aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2011-02-20 13:06:31 +0000
committerDimitry Andric <dim@FreeBSD.org>2011-02-20 13:06:31 +0000
commitbca07a4524feb4edec581062d631a13116320a24 (patch)
treea9243275843fbeaa590afc07ee888e006b8d54ea /lib/CodeGen
parent998bc5802ecdd65ce3b270f6c69a8ae8557f0a10 (diff)
downloadsrc-bca07a4524feb4edec581062d631a13116320a24.tar.gz
src-bca07a4524feb4edec581062d631a13116320a24.zip
Vendor import of clang trunk r126079:vendor/clang/clang-r126079
Notes
Notes: svn path=/vendor/clang/dist/; revision=218887 svn path=/vendor/clang/clang-r126079/; revision=218888; tag=vendor/clang/clang-r126079
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h31
-rw-r--r--lib/CodeGen/BackendUtil.cpp42
-rw-r--r--lib/CodeGen/CGBlocks.cpp1806
-rw-r--r--lib/CodeGen/CGBlocks.h253
-rw-r--r--lib/CodeGen/CGBuiltin.cpp680
-rw-r--r--lib/CodeGen/CGCXX.cpp251
-rw-r--r--lib/CodeGen/CGCXX.h36
-rw-r--r--lib/CodeGen/CGCXXABI.cpp174
-rw-r--r--lib/CodeGen/CGCXXABI.h28
-rw-r--r--lib/CodeGen/CGCall.cpp157
-rw-r--r--lib/CodeGen/CGClass.cpp188
-rw-r--r--lib/CodeGen/CGCleanup.cpp1144
-rw-r--r--lib/CodeGen/CGCleanup.h560
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp1006
-rw-r--r--lib/CodeGen/CGDebugInfo.h56
-rw-r--r--lib/CodeGen/CGDecl.cpp349
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp194
-rw-r--r--lib/CodeGen/CGException.cpp442
-rw-r--r--lib/CodeGen/CGException.h558
-rw-r--r--lib/CodeGen/CGExpr.cpp662
-rw-r--r--lib/CodeGen/CGExprAgg.cpp480
-rw-r--r--lib/CodeGen/CGExprCXX.cpp488
-rw-r--r--lib/CodeGen/CGExprComplex.cpp293
-rw-r--r--lib/CodeGen/CGExprConstant.cpp286
-rw-r--r--lib/CodeGen/CGExprScalar.cpp937
-rw-r--r--lib/CodeGen/CGObjC.cpp675
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp107
-rw-r--r--lib/CodeGen/CGObjCMac.cpp252
-rw-r--r--lib/CodeGen/CGObjCRuntime.h12
-rw-r--r--lib/CodeGen/CGRTTI.cpp276
-rw-r--r--lib/CodeGen/CGRecordLayout.h69
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp613
-rw-r--r--lib/CodeGen/CGStmt.cpp228
-rw-r--r--lib/CodeGen/CGTemporaries.cpp70
-rw-r--r--lib/CodeGen/CGVTT.cpp88
-rw-r--r--lib/CodeGen/CGVTables.cpp262
-rw-r--r--lib/CodeGen/CGVTables.h126
-rw-r--r--lib/CodeGen/CGValue.h132
-rw-r--r--lib/CodeGen/CMakeLists.txt13
-rw-r--r--lib/CodeGen/CodeGenAction.cpp64
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp899
-rw-r--r--lib/CodeGen/CodeGenFunction.h815
-rw-r--r--lib/CodeGen/CodeGenModule.cpp527
-rw-r--r--lib/CodeGen/CodeGenModule.h168
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp180
-rw-r--r--lib/CodeGen/CodeGenTBAA.h76
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp69
-rw-r--r--lib/CodeGen/CodeGenTypes.h20
-rw-r--r--lib/CodeGen/GlobalDecl.h8
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp309
-rw-r--r--lib/CodeGen/Mangle.cpp2515
-rw-r--r--lib/CodeGen/Mangle.h177
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp1178
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp13
-rw-r--r--lib/CodeGen/TargetInfo.cpp414
-rw-r--r--lib/CodeGen/TargetInfo.h9
56 files changed, 10696 insertions, 10769 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 91b7742557f5..ce1039849b7c 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -70,11 +70,12 @@ namespace clang {
Kind TheKind;
llvm::PATypeHolder TypeData;
unsigned UIntData;
- bool BoolData;
+ bool BoolData0;
+ bool BoolData1;
ABIArgInfo(Kind K, const llvm::Type *TD=0,
- unsigned UI=0, bool B = false)
- : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+ unsigned UI=0, bool B0 = false, bool B1 = false)
+ : TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
@@ -88,8 +89,9 @@ namespace clang {
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
- static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal);
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
}
static ABIArgInfo getExpand() {
return ABIArgInfo(Expand);
@@ -105,7 +107,7 @@ namespace clang {
bool canHaveCoerceToType() const {
return TheKind == Direct || TheKind == Extend;
}
-
+
// Direct/Extend accessors
unsigned getDirectOffset() const {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
@@ -115,12 +117,12 @@ namespace clang {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
-
+
void setCoerceToType(const llvm::Type *T) {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
-
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
@@ -129,9 +131,14 @@ namespace clang {
bool getIndirectByVal() const {
assert(TheKind == Indirect && "Invalid kind!");
- return BoolData;
+ return BoolData0;
}
-
+
+ bool getIndirectRealign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData1;
+ }
+
void dump() const;
};
@@ -140,10 +147,10 @@ namespace clang {
class ABIInfo {
public:
CodeGen::CodeGenTypes &CGT;
-
+
ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
virtual ~ABIInfo();
-
+
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::TargetData &getTargetData() const;
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 69efe438cc9a..9897b1b1a028 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -26,6 +26,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
@@ -42,14 +43,14 @@ class EmitAssemblyHelper {
Timer CodeGenerationTime;
- mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *CodeGenPasses;
mutable PassManager *PerModulePasses;
mutable FunctionPassManager *PerFunctionPasses;
private:
- FunctionPassManager *getCodeGenPasses() const {
+ PassManager *getCodeGenPasses() const {
if (!CodeGenPasses) {
- CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses = new PassManager();
CodeGenPasses->add(new TargetData(TheModule));
}
return CodeGenPasses;
@@ -107,14 +108,22 @@ void EmitAssemblyHelper::CreatePasses() {
OptLevel = 0;
Inlining = CodeGenOpts.NoInlining;
}
+
+ FunctionPassManager *FPM = getPerFunctionPasses();
+
+ TargetLibraryInfo *TLI =
+ new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ FPM->add(TLI);
// In -O0 if checking is disabled, we don't even have per-function passes.
if (CodeGenOpts.VerifyModule)
- getPerFunctionPasses()->add(createVerifierPass());
+ FPM->add(createVerifierPass());
// Assume that standard function passes aren't run for -O0.
if (OptLevel > 0)
- llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+ llvm::createStandardFunctionPasses(FPM, OptLevel);
llvm::Pass *InliningPass = 0;
switch (Inlining) {
@@ -136,8 +145,15 @@ void EmitAssemblyHelper::CreatePasses() {
break;
}
+ PassManager *MPM = getPerModulePasses();
+
+ TLI = new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ MPM->add(TLI);
+
// For now we always create per module passes.
- llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ llvm::createStandardModulePasses(MPM, OptLevel,
CodeGenOpts.OptimizeSize,
CodeGenOpts.UnitAtATime,
CodeGenOpts.UnrollLoops,
@@ -183,7 +199,11 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
llvm::FloatABIType = llvm::FloatABI::Default;
}
+ llvm::LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ llvm::NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ llvm::NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
UnwindTablesMandatory = CodeGenOpts.UnwindTables;
@@ -248,7 +268,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCRelaxAll(true);
// Create the code generator passes.
- FunctionPassManager *PM = getCodeGenPasses();
+ PassManager *PM = getCodeGenPasses();
CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
switch (CodeGenOpts.OptimizationLevel) {
@@ -320,13 +340,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
if (CodeGenPasses) {
PrettyStackTraceString CrashInfo("Code generation");
-
- CodeGenPasses->doInitialization();
- for (Module::iterator I = TheModule->begin(),
- E = TheModule->end(); I != E; ++I)
- if (!I->isDeclaration())
- CodeGenPasses->run(*I);
- CodeGenPasses->doFinalization();
+ CodeGenPasses->run(*TheModule);
}
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 04f1ef24b297..a35648d23d51 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
#include "llvm/ADT/SmallSet.h"
@@ -24,397 +25,631 @@
using namespace clang;
using namespace CodeGen;
-CGBlockInfo::CGBlockInfo(const char *N)
- : Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
+CGBlockInfo::CGBlockInfo(const BlockExpr *blockExpr, const char *N)
+ : Name(N), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), StructureType(0), Block(blockExpr) {
// Skip asm prefix, if any.
if (Name && Name[0] == '\01')
++Name;
}
+/// Build the given block as a global block.
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn);
-llvm::Constant *CodeGenFunction::
-BuildDescriptorBlockDecl(const BlockExpr *BE, const CGBlockInfo &Info,
- const llvm::StructType* Ty,
- llvm::Constant *BlockVarLayout,
- std::vector<HelperInfo> *NoteForHelper) {
- bool BlockHasCopyDispose = Info.BlockHasCopyDispose;
- CharUnits Size = Info.BlockSize;
- const llvm::Type *UnsignedLongTy
- = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
- llvm::Constant *C;
- std::vector<llvm::Constant*> Elts;
+/// Build the helper function to copy a block.
+static llvm::Constant *buildCopyHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
+}
+
+/// Build the helper function to dipose of a block.
+static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
+}
+
+/// Build the block descriptor constant for a block.
+static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ ASTContext &C = CGM.getContext();
+
+ const llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+
+ llvm::SmallVector<llvm::Constant*, 6> elements;
// reserved
- C = llvm::ConstantInt::get(UnsignedLongTy, 0);
- Elts.push_back(C);
+ elements.push_back(llvm::ConstantInt::get(ulong, 0));
// Size
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
- C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
- Elts.push_back(C);
+ elements.push_back(llvm::ConstantInt::get(ulong,
+ blockInfo.BlockSize.getQuantity()));
- // optional copy/dispose helpers
- if (BlockHasCopyDispose) {
+ // Optional copy/dispose helpers.
+ if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
- Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+ elements.push_back(buildCopyHelper(CGM, blockInfo));
// destroy_func_decl
- Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ elements.push_back(buildDisposeHelper(CGM, blockInfo));
}
- // Signature. non-optional ObjC-style method descriptor @encode sequence
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- Elts.push_back(llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
+ // Signature. Mandatory ObjC-style method descriptor @encode sequence.
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
- // Layout.
- C = BlockVarLayout;
-
- Elts.push_back(C);
+ // GC layout.
+ if (C.getLangOptions().ObjC1)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(llvm::Constant::getNullValue(i8p));
+
+ llvm::Constant *init =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(), elements.data(),
+ elements.size(), false);
- C = llvm::ConstantStruct::get(VMContext, Elts, false);
+ llvm::GlobalVariable *global =
+ new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ init, "__block_descriptor_tmp");
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- C, "__block_descriptor_tmp");
- return C;
+ return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
-static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
- for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
- I != E; ++I)
- if (*I)
- CollectBlockDeclRefInfo(*I, Info);
-
- // We want to ensure we walk down into block literals so we can find
- // all nested BlockDeclRefExprs.
- if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
- Info.InnerBlocks.insert(BE->getBlockDecl());
- CollectBlockDeclRefInfo(BE->getBody(), Info);
- }
+static BlockFlags computeBlockFlag(CodeGenModule &CGM,
+ const BlockExpr *BE,
+ BlockFlags flags) {
+ const FunctionType *ftype = BE->getFunctionType();
+
+ // This is a bit overboard.
+ CallArgList args;
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().getFunctionInfo(ftype->getResultType(), args,
+ ftype->getExtInfo());
+
+ if (CGM.ReturnTypeUsesSRet(fnInfo))
+ flags |= BLOCK_USE_STRET;
- else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
- const ValueDecl *D = BDRE->getDecl();
- // FIXME: Handle enums.
- if (isa<FunctionDecl>(D))
- return;
-
- if (isa<ImplicitParamDecl>(D) &&
- isa<ObjCMethodDecl>(D->getDeclContext()) &&
- cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) {
- Info.NeedsObjCSelf = true;
- return;
+ return flags;
+}
+
+/*
+ Purely notional variadic template describing the layout of a block.
+
+ template <class _ResultType, class... _ParamTypes, class... _CaptureTypes>
+ struct Block_literal {
+ /// Initialized to one of:
+ /// extern void *_NSConcreteStackBlock[];
+ /// extern void *_NSConcreteGlobalBlock[];
+ ///
+ /// In theory, we could start one off malloc'ed by setting
+ /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using
+ /// this isa:
+ /// extern void *_NSConcreteMallocBlock[];
+ struct objc_class *isa;
+
+ /// These are the flags (with corresponding bit number) that the
+ /// compiler is actually supposed to know about.
+ /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
+ /// descriptor provides copy and dispose helper functions
+ /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
+ /// object with a nontrivial destructor or copy constructor
+ /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated
+ /// as global memory
+ /// 29. BLOCK_USE_STRET - indicates that the block function
+ /// uses stret, which objc_msgSend needs to know about
+ /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an
+ /// @encoded signature string
+ /// And we're not supposed to manipulate these:
+ /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved
+ /// to malloc'ed memory
+ /// 27. BLOCK_IS_GC - indicates that the block has been moved to
+ /// to GC-allocated memory
+ /// Additionally, the bottom 16 bits are a reference count which
+ /// should be zero on the stack.
+ int flags;
+
+ /// Reserved; should be zero-initialized.
+ int reserved;
+
+ /// Function pointer generated from block literal.
+ _ResultType (*invoke)(Block_literal *, _ParamTypes...);
+
+ /// Block description metadata generated from block literal.
+ struct Block_descriptor *block_descriptor;
+
+ /// Captured values follow.
+ _CapturesTypes captures...;
+ };
+ */
+
+/// The number of fields in a block header.
+const unsigned BlockHeaderSize = 5;
+
+namespace {
+ /// A chunk of data that we actually have to capture in the block.
+ struct BlockLayoutChunk {
+ CharUnits Alignment;
+ CharUnits Size;
+ const BlockDecl::Capture *Capture; // null for 'this'
+ const llvm::Type *Type;
+
+ BlockLayoutChunk(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture,
+ const llvm::Type *type)
+ : Alignment(align), Size(size), Capture(capture), Type(type) {}
+
+ /// Tell the block info that this chunk has the given field index.
+ void setIndex(CGBlockInfo &info, unsigned index) {
+ if (!Capture)
+ info.CXXThisIndex = index;
+ else
+ info.Captures[Capture->getVariable()]
+ = CGBlockInfo::Capture::makeIndex(index);
}
+ };
- // Only Decls that escape are added.
- if (!Info.InnerBlocks.count(D->getDeclContext()))
- Info.DeclRefs.push_back(BDRE);
+ /// Order by descending alignment.
+ bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
+ return left.Alignment > right.Alignment;
}
+}
- // Make sure to capture implicit 'self' references due to super calls.
- else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) {
- if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
- E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
- Info.NeedsObjCSelf = true;
+/// Determines if the given record type has a mutable field.
+static bool hasMutableField(const CXXRecordDecl *record) {
+ for (CXXRecordDecl::field_iterator
+ i = record->field_begin(), e = record->field_end(); i != e; ++i)
+ if ((*i)->isMutable())
+ return true;
+
+ for (CXXRecordDecl::base_class_const_iterator
+ i = record->bases_begin(), e = record->bases_end(); i != e; ++i) {
+ const RecordType *record = i->getType()->castAs<RecordType>();
+ if (hasMutableField(cast<CXXRecordDecl>(record->getDecl())))
+ return true;
}
- // Getter/setter uses may also cause implicit super references,
- // which we can check for with:
- else if (isa<ObjCSuperExpr>(S))
- Info.NeedsObjCSelf = true;
+ return false;
+}
+
+/// Determines if the given type is safe for constant capture in C++.
+static bool isSafeForCXXConstantCapture(QualType type) {
+ const RecordType *recordType =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>();
- else if (isa<CXXThisExpr>(S))
- Info.CXXThisRef = cast<CXXThisExpr>(S);
+ // Only records can be unsafe.
+ if (!recordType) return true;
+
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(recordType->getDecl());
+
+ // Maintain semantics for classes with non-trivial dtors or copy ctors.
+ if (!record->hasTrivialDestructor()) return false;
+ if (!record->hasTrivialCopyConstructor()) return false;
+
+ // Otherwise, we just have to make sure there aren't any mutable
+ // fields that might have changed since initialization.
+ return !hasMutableField(record);
}
-/// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be
-/// declared as a global variable instead of on the stack.
-static bool CanBlockBeGlobal(const CGBlockInfo &Info) {
- return Info.DeclRefs.empty();
+/// It is illegal to modify a const object after initialization.
+/// Therefore, if a const object has a constant initializer, we don't
+/// actually need to keep storage for it in the block; we'll just
+/// rematerialize it at the start of the block function. This is
+/// acceptable because we make no promises about address stability of
+/// captured variables.
+static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ const VarDecl *var) {
+ QualType type = var->getType();
+
+ // We can only do this if the variable is const.
+ if (!type.isConstQualified()) return 0;
+
+ // Furthermore, in C++ we have to worry about mutable fields:
+ // C++ [dcl.type.cv]p4:
+ // Except that any class member declared mutable can be
+ // modified, any attempt to modify a const object during its
+ // lifetime results in undefined behavior.
+ if (CGM.getLangOptions().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ return 0;
+
+ // If the variable doesn't have any initializer (shouldn't this be
+ // invalid?), it's not clear what we should do. Maybe capture as
+ // zero?
+ const Expr *init = var->getInit();
+ if (!init) return 0;
+
+ return CGM.EmitConstantExpr(init, var->getType());
}
-/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
-/// ensure we can generate the debug information for the parameter for the block
-/// invoke function.
-static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
- if (Info.CXXThisRef)
- CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef);
-
- for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
- CGF.AllocateBlockDecl(Info.DeclRefs[i]);
-
- if (Info.NeedsObjCSelf) {
- ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl();
- BlockDeclRefExpr *BDRE =
- new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(),
- SourceLocation(), false);
- Info.DeclRefs.push_back(BDRE);
- CGF.AllocateBlockDecl(BDRE);
- }
+/// Get the low bit of a nonzero character count. This is the
+/// alignment of the nth byte if the 0th byte is universally aligned.
+static CharUnits getLowBit(CharUnits v) {
+ return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1));
}
-static unsigned computeBlockFlag(CodeGenModule &CGM,
- const BlockExpr *BE, unsigned flags) {
- QualType BPT = BE->getType();
- const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
- QualType ResultType = ftype->getResultType();
-
- CallArgList Args;
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
- FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSRet(FnInfo))
- flags |= CodeGenFunction::BLOCK_USE_STRET;
- return flags;
+static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
+ std::vector<const llvm::Type*> &elementTypes) {
+ ASTContext &C = CGM.getContext();
+
+ // The header is basically a 'struct { void *; int; int; void *; void *; }'.
+ CharUnits ptrSize, ptrAlign, intSize, intAlign;
+ llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
+ llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
+
+ // Are there crazy embedded platforms where this isn't true?
+ assert(intSize <= ptrSize && "layout assumptions horribly violated");
+
+ CharUnits headerSize = ptrSize;
+ if (2 * intSize < ptrAlign) headerSize += ptrSize;
+ else headerSize += 2 * intSize;
+ headerSize += 2 * ptrSize;
+
+ info.BlockAlign = ptrAlign;
+ info.BlockSize = headerSize;
+
+ assert(elementTypes.empty());
+ const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ const llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.getBlockDescriptorType());
+
+ assert(elementTypes.size() == BlockHeaderSize);
}
-// FIXME: Push most into CGM, passing down a few bits, like current function
-// name.
-llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
- std::string Name = CurFn->getName();
- CGBlockInfo Info(Name.c_str());
- Info.InnerBlocks.insert(BE->getBlockDecl());
- CollectBlockDeclRefInfo(BE->getBody(), Info);
+/// Compute the layout of the given block. Attempts to lay the block
+/// out with minimal space requirements.
+static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *block = info.getBlockDecl();
- // Check if the block can be global.
- // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
- // to just have one code path. We should move this function into CGM and pass
- // CGF, then we can just check to see if CGF is 0.
- if (0 && CanBlockBeGlobal(Info))
- return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+ std::vector<const llvm::Type*> elementTypes;
+ initializeForBlockHeader(CGM, info, elementTypes);
- size_t BlockFields = 5;
+ if (!block->hasCaptures()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
- std::vector<llvm::Constant*> Elts(BlockFields);
+ // Collect the layout chunks.
+ llvm::SmallVector<BlockLayoutChunk, 16> layout;
+ layout.reserve(block->capturesCXXThis() +
+ (block->capture_end() - block->capture_begin()));
- llvm::Constant *C;
- llvm::Value *V;
+ CharUnits maxFieldAlign;
- {
- llvm::Constant *BlockVarLayout;
- // C = BuildBlockStructInitlist();
- unsigned int flags = BLOCK_HAS_SIGNATURE;
+ // First, 'this'.
+ if (block->capturesCXXThis()) {
+ const DeclContext *DC = block->getDeclContext();
+ for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
+ ;
+ QualType thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
- // We run this first so that we set BlockHasCopyDispose from the entire
- // block literal.
- // __invoke
- llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
- BlockVarLayout,
- LocalDeclMap);
- BlockHasCopyDispose |= Info.BlockHasCopyDispose;
- Elts[3] = Fn;
-
- // FIXME: Don't use BlockHasCopyDispose, it is set more often then
- // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
- if (Info.BlockHasCopyDispose)
- flags |= BLOCK_HAS_COPY_DISPOSE;
-
- // __isa
- C = CGM.getNSConcreteStackBlock();
- C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
- Elts[0] = C;
-
- // __flags
- flags = computeBlockFlag(CGM, BE, flags);
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().IntTy));
- C = llvm::ConstantInt::get(IntTy, flags);
- Elts[1] = C;
-
- // __reserved
- C = llvm::ConstantInt::get(IntTy, 0);
- Elts[2] = C;
-
- if (Info.BlockLayout.empty()) {
- // __descriptor
- C = llvm::Constant::getNullValue(PtrToInt8Ty);
- Elts[4] = BuildDescriptorBlockDecl(BE, Info, 0, C, 0);
-
- // Optimize to being a global block.
- Elts[0] = CGM.getNSConcreteGlobalBlock();
-
- Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
-
- C = llvm::ConstantStruct::get(VMContext, Elts, false);
-
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
- llvm::GlobalValue::InternalLinkage, C,
- "__block_holder_tmp_" +
- llvm::Twine(CGM.getGlobalUniqueCount()));
- QualType BPT = BE->getType();
- C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
- return C;
+ const llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, 0, llvmType));
+ }
+
+ // Next, all the block captures.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+
+ if (ci->isByRef()) {
+ // We have to copy/dispose of the __block reference.
+ info.NeedsCopyDispose = true;
+
+ // Just use void* instead of a pointer to the byref type.
+ QualType byRefPtrTy = C.VoidPtrTy;
+
+ const llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ &*ci, llvmType));
+ continue;
+ }
+
+ // Otherwise, build a layout chunk with the size and alignment of
+ // the declaration.
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, variable)) {
+ info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ continue;
}
- std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size());
- for (int i=0; i<4; ++i)
- Types[i] = Elts[i]->getType();
- Types[4] = PtrToInt8Ty;
-
- for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) {
- const Expr *E = Info.BlockLayout[i];
- const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
- QualType Ty = E->getType();
- if (BDRE && BDRE->isByRef()) {
- Types[i+BlockFields] =
- llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
- } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
- Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
- } else
- Types[i+BlockFields] = ConvertType(Ty);
+ // Block pointers require copy/dispose.
+ if (variable->getType()->isBlockPointerType()) {
+ info.NeedsCopyDispose = true;
+
+ // So do Objective-C pointers.
+ } else if (variable->getType()->isObjCObjectPointerType() ||
+ C.isObjCNSObjectType(variable->getType())) {
+ info.NeedsCopyDispose = true;
+
+ // So do types that require non-trivial copy construction.
+ } else if (ci->hasCopyExpr()) {
+ info.NeedsCopyDispose = true;
+ info.HasCXXObject = true;
+
+ // And so do types with destructors.
+ } else if (CGM.getLangOptions().CPlusPlus) {
+ if (const CXXRecordDecl *record =
+ variable->getType()->getAsCXXRecordDecl()) {
+ if (!record->hasTrivialDestructor()) {
+ info.HasCXXObject = true;
+ info.NeedsCopyDispose = true;
+ }
+ }
}
- llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
+ CharUnits size = C.getTypeSizeInChars(variable->getType());
+ CharUnits align = C.getDeclAlign(variable);
+ maxFieldAlign = std::max(maxFieldAlign, align);
- llvm::AllocaInst *A = CreateTempAlloca(Ty);
- A->setAlignment(Info.BlockAlign.getQuantity());
- V = A;
+ const llvm::Type *llvmType =
+ CGM.getTypes().ConvertTypeForMem(variable->getType());
+
+ layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
+ }
+
+ // If that was everything, we're done here.
+ if (layout.empty()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
- // Build layout / cleanup information for all the data entries in the
- // layout, and write the enclosing fields into the type.
- std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size());
- unsigned NumHelpers = 0;
+ // Sort the layout by alignment. We have to use a stable sort here
+ // to get reproducible results. There should probably be an
+ // llvm::array_pod_stable_sort.
+ std::stable_sort(layout.begin(), layout.end());
+
+ CharUnits &blockSize = info.BlockSize;
+ info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign);
+
+ // Assuming that the first byte in the header is maximally aligned,
+ // get the alignment of the first byte following the header.
+ CharUnits endAlign = getLowBit(blockSize);
+
+ // If the end of the header isn't satisfactorily aligned for the
+ // maximum thing, look for things that are okay with the header-end
+ // alignment, and keep appending them until we get something that's
+ // aligned right. This algorithm is only guaranteed optimal if
+ // that condition is satisfied at some point; otherwise we can get
+ // things like:
+ // header // next byte has alignment 4
+ // something_with_size_5; // next byte has alignment 1
+ // something_with_alignment_8;
+ // which has 7 bytes of padding, as opposed to the naive solution
+ // which might have less (?).
+ if (endAlign < maxFieldAlign) {
+ llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin() + 1, le = layout.end();
+
+ // Look for something that the header end is already
+ // satisfactorily aligned for.
+ for (; li != le && endAlign < li->Alignment; ++li)
+ ;
+
+ // If we found something that's naturally aligned for the end of
+ // the header, keep adding things...
+ if (li != le) {
+ llvm::SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ for (; li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+
+ // ...until we get to the alignment of the maximum field.
+ if (endAlign >= maxFieldAlign)
+ break;
+ }
- for (unsigned i=0; i<4; ++i)
- Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+ // Don't re-append everything we just appended.
+ layout.erase(first, li);
+ }
+ }
- for (unsigned i=0; i < Info.BlockLayout.size(); ++i) {
- const Expr *E = Info.BlockLayout[i];
+ // At this point, we just have to add padding if the end align still
+ // isn't aligned right.
+ if (endAlign < maxFieldAlign) {
+ CharUnits padding = maxFieldAlign - endAlign;
- // Skip padding.
- if (isa<DeclRefExpr>(E)) continue;
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
- llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
- HelperInfo &Note = NoteForHelper[NumHelpers++];
+ endAlign = getLowBit(blockSize);
+ assert(endAlign >= maxFieldAlign);
+ }
- Note.index = i+5;
+ // Slam everything else on now. This works because they have
+ // strictly decreasing alignment and we expect that size is always a
+ // multiple of alignment.
+ for (llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin(), le = layout.end(); li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+ }
- if (isa<CXXThisExpr>(E)) {
- Note.RequiresCopying = false;
- Note.flag = BLOCK_FIELD_IS_OBJECT;
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+}
- Builder.CreateStore(LoadCXXThis(), Addr);
- continue;
- }
+/// Emit a block literal expression in the current function.
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
+ std::string Name = CurFn->getName();
+ CGBlockInfo blockInfo(blockExpr, Name.c_str());
+
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(CGM, blockInfo);
+
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ CurFuncDecl, LocalDeclMap);
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ // If there is nothing to capture, we can emit this as a global block.
+ if (blockInfo.CanBeGlobal)
+ return buildGlobalBlock(CGM, blockInfo, blockFn);
+
+ // Otherwise, we have to emit this as a local block.
+
+ llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
+
+ // Build the block descriptor.
+ llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
+
+ const llvm::Type *intTy = ConvertType(getContext().IntTy);
+
+ llvm::AllocaInst *blockAddr =
+ CreateTempAlloca(blockInfo.StructureType, "block");
+ blockAddr->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Compute the initial on-stack block flags.
+ BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
+ flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(), flags);
+
+ // Initialize the block literal.
+ Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
+ Builder.CreateStore(llvm::ConstantInt::get(intTy, flags.getBitMask()),
+ Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
+ Builder.CreateStore(llvm::ConstantInt::get(intTy, 0),
+ Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
+ Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
+ "block.invoke"));
+ Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockAddr, 4,
+ "block.descriptor"));
+
+ // Finally, capture all the values into the block.
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // First, 'this'.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(blockAddr,
+ blockInfo.CXXThisIndex,
+ "block.captured-this.addr");
+ Builder.CreateStore(LoadCXXThis(), addr);
+ }
- const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E);
- const ValueDecl *VD = BDRE->getDecl();
- QualType T = VD->getType();
+ // Next, captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- Note.RequiresCopying = BlockRequiresCopying(T);
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
- if (BDRE->isByRef()) {
- Note.flag = BLOCK_FIELD_IS_BYREF;
- if (T.isObjCGCWeak())
- Note.flag |= BLOCK_FIELD_IS_WEAK;
- } else if (T->isBlockPointerType()) {
- Note.flag = BLOCK_FIELD_IS_BLOCK;
- } else {
- Note.flag = BLOCK_FIELD_IS_OBJECT;
- }
+ QualType type = variable->getType();
- if (LocalDeclMap[VD]) {
- if (BDRE->isByRef()) {
- llvm::Value *Loc = LocalDeclMap[VD];
- Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Builder.CreateStore(Loc, Addr);
- continue;
- } else {
- if (BDRE->getCopyConstructorExpr()) {
- E = BDRE->getCopyConstructorExpr();
- PushDestructorCleanup(E->getType(), Addr);
- }
- else {
- E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
- VD->getType().getNonReferenceType(),
- SourceLocation());
- if (VD->getType()->isReferenceType()) {
- E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
- getContext().getPointerType(E->getType()),
- SourceLocation());
- }
- }
- }
- }
+ // This will be a [[type]]*, except that a byref entry will just be
+ // an i8**.
+ llvm::Value *blockField =
+ Builder.CreateStructGEP(blockAddr, capture.getIndex(),
+ "block.captured");
- if (BDRE->isByRef()) {
- E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
- getContext().getPointerType(E->getType()),
- SourceLocation());
- }
+ // Compute the address of the thing we're going to move into the
+ // block literal.
+ llvm::Value *src;
+ if (ci->isNested()) {
+ // We need to use the capture from the enclosing block.
+ const CGBlockInfo::Capture &enclosingCapture =
+ BlockInfo->getCapture(variable);
+
+ // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
+ enclosingCapture.getIndex(),
+ "block.capture.addr");
+ } else {
+ // This is a [[type]]*.
+ src = LocalDeclMap[variable];
+ }
- RValue r = EmitAnyExpr(E, Addr, false);
- if (r.isScalar()) {
- llvm::Value *Loc = r.getScalarVal();
- const llvm::Type *Ty = Types[i+BlockFields];
- if (BDRE->isByRef()) {
- // E is now the address of the value field, instead, we want the
- // address of the actual ByRef struct. We optimize this slightly
- // compared to gcc by not grabbing the forwarding slot as this must
- // be done during Block_copy for us, and we can postpone the work
- // until then.
- CharUnits offset = BlockDecls[BDRE->getDecl()];
-
- llvm::Value *BlockLiteral = LoadBlockStruct();
-
- Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
- "block.literal");
- Ty = llvm::PointerType::get(Ty, 0);
- Loc = Builder.CreateBitCast(Loc, Ty);
- Loc = Builder.CreateLoad(Loc);
- // Loc = Builder.CreateBitCast(Loc, Ty);
- }
- Builder.CreateStore(Loc, Addr);
- } else if (r.isComplex())
- // FIXME: implement
- ErrorUnsupported(BE, "complex in block literal");
- else if (r.isAggregate())
- ; // Already created into the destination
+ // For byrefs, we just write the pointer to the byref struct into
+ // the block field. There's no need to chase the forwarding
+ // pointer at this point, since we're building something that will
+ // live a shorter life than the stack byref anyway.
+ if (ci->isByRef()) {
+ // Get a void* that points to the byref struct.
+ if (ci->isNested())
+ src = Builder.CreateLoad(src, "byref.capture");
else
- assert (0 && "bad block variable");
- // FIXME: Ensure that the offset created by the backend for
- // the struct matches the previously computed offset in BlockDecls.
+ src = Builder.CreateBitCast(src, VoidPtrTy);
+
+ // Write that void* into the capture field.
+ Builder.CreateStore(src, blockField);
+
+ // If we have a copy constructor, evaluate that into the block field.
+ } else if (const Expr *copyExpr = ci->getCopyExpr()) {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+
+ // If it's a reference variable, copy the reference into the block field.
+ } else if (type->isReferenceType()) {
+ Builder.CreateStore(Builder.CreateLoad(src, "ref.val"), blockField);
+
+ // Otherwise, fake up a POD copy into the block field.
+ } else {
+ // We use one of these or the other depending on whether the
+ // reference is nested.
+ DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
+ SourceLocation());
+ BlockDeclRefExpr nested(const_cast<VarDecl*>(variable), type,
+ VK_LValue, SourceLocation(), /*byref*/ false);
+
+ Expr *declRef =
+ (ci->isNested() ? static_cast<Expr*>(&nested) : &notNested);
+
+ ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
+ declRef, VK_RValue);
+ EmitAnyExprToMem(&l2r, blockField, /*volatile*/ false, /*init*/ true);
}
- NoteForHelper.resize(NumHelpers);
-
- // __descriptor
- llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, Info, Ty,
- BlockVarLayout,
- &NoteForHelper);
- Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
- Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
- }
- QualType BPT = BE->getType();
- V = Builder.CreateBitCast(V, ConvertType(BPT));
- // See if this is a __weak block variable and the must call objc_read_weak
- // on it.
- const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
- QualType RES = ftype->getResultType();
- if (RES.isObjCGCWeak()) {
- // Must cast argument to id*
- const llvm::Type *ObjectPtrTy =
- ConvertType(CGM.getContext().getObjCIdType());
- const llvm::Type *PtrObjectPtrTy =
- llvm::PointerType::getUnqual(ObjectPtrTy);
- V = Builder.CreateBitCast(V, PtrObjectPtrTy);
- V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V);
+ // Push a destructor if necessary. The semantics for when this
+ // actually gets run are really obscure.
+ if (!ci->isByRef() && CGM.getLangOptions().CPlusPlus)
+ PushDestructorCleanup(type, blockField);
}
- return V;
+
+ // Cast to the converted block-pointer type, which happens (somewhat
+ // unfortunately) to be a pointer to function type.
+ llvm::Value *result =
+ Builder.CreateBitCast(blockAddr,
+ ConvertType(blockInfo.getBlockExpr()->getType()));
+
+ return result;
}
-const llvm::Type *BlockModule::getBlockDescriptorType() {
+const llvm::Type *CodeGenModule::getBlockDescriptorType() {
if (BlockDescriptorType)
return BlockDescriptorType;
@@ -443,18 +678,16 @@ const llvm::Type *BlockModule::getBlockDescriptorType() {
getModule().addTypeName("struct.__block_descriptor",
BlockDescriptorType);
+ // Now form a pointer to that.
+ BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
return BlockDescriptorType;
}
-const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
- const llvm::Type *BlockDescPtrTy =
- llvm::PointerType::getUnqual(getBlockDescriptorType());
-
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- getTypes().ConvertType(getContext().IntTy));
+ const llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
// struct __block_literal_generic {
// void *__isa;
@@ -463,11 +696,11 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
- GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
- PtrToInt8Ty,
+ GenericBlockLiteralType = llvm::StructType::get(getLLVMContext(),
+ VoidPtrTy,
IntTy,
IntTy,
- PtrToInt8Ty,
+ VoidPtrTy,
BlockDescPtrTy,
NULL);
@@ -496,10 +729,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
// Get the function pointer from the literal.
llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
- BlockLiteral =
- Builder.CreateBitCast(BlockLiteral,
- llvm::Type::getInt8PtrTy(VMContext),
- "tmp");
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy, "tmp");
// Add the block literal.
QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
@@ -533,318 +763,222 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) {
- assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer");
-
- // Figure out what the offset is.
- QualType T = E->getType();
- std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T);
- CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second);
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
+ assert(BlockInfo && "evaluating block ref without block information?");
+ const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
- BlockCXXThisOffset = Offset;
- BlockLayout.push_back(E);
-}
-
-void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
- const ValueDecl *VD = E->getDecl();
- CharUnits &Offset = BlockDecls[VD];
+ // Handle constant captures.
+ if (capture.isConstant()) return LocalDeclMap[variable];
- // See if we have already allocated an offset for this variable.
- if (!Offset.isZero())
- return;
+ llvm::Value *addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ "block.capture.addr");
- // Don't run the expensive check, unless we have to.
- if (!BlockHasCopyDispose)
- if (E->isByRef()
- || BlockRequiresCopying(E->getType()))
- BlockHasCopyDispose = true;
+ if (isByRef) {
+ // addr should be a void** right now. Load, then cast the result
+ // to byref*.
- const ValueDecl *D = cast<ValueDecl>(E->getDecl());
+ addr = Builder.CreateLoad(addr);
+ const llvm::PointerType *byrefPointerType
+ = llvm::PointerType::get(BuildByRefType(variable), 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType,
+ "byref.addr");
- CharUnits Size;
- CharUnits Align;
+ // Follow the forwarding pointer.
+ addr = Builder.CreateStructGEP(addr, 1, "byref.forwarding");
+ addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
- if (E->isByRef()) {
- llvm::tie(Size,Align) =
- getContext().getTypeInfoInChars(getContext().VoidPtrTy);
- } else {
- Size = getContext().getTypeSizeInChars(D->getType());
- Align = getContext().getDeclAlign(D);
+ // Cast back to byref* and GEP over to the actual object.
+ addr = Builder.CreateBitCast(addr, byrefPointerType);
+ addr = Builder.CreateStructGEP(addr, getByRefValueLLVMField(variable),
+ variable->getNameAsString());
}
- Offset = getBlockOffset(Size, Align);
- BlockLayout.push_back(E);
-}
+ if (variable->getType()->isReferenceType())
+ addr = Builder.CreateLoad(addr, "ref.tmp");
-llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
- bool IsByRef) {
-
- CharUnits offset = BlockDecls[VD];
- assert(!offset.isZero() && "getting address of unallocated decl");
-
- llvm::Value *BlockLiteral = LoadBlockStruct();
- llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
- "block.literal");
- if (IsByRef) {
- const llvm::Type *PtrStructTy
- = llvm::PointerType::get(BuildByRefType(VD), 0);
- // The block literal will need a copy/destroy helper.
- BlockHasCopyDispose = true;
-
- const llvm::Type *Ty = PtrStructTy;
- Ty = llvm::PointerType::get(Ty, 0);
- V = Builder.CreateBitCast(V, Ty);
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, 1, "forwarding");
- V = Builder.CreateLoad(V);
- V = Builder.CreateBitCast(V, PtrStructTy);
- V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
- VD->getNameAsString());
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V);
- } else {
- const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
- Ty = llvm::PointerType::get(Ty, 0);
- V = Builder.CreateBitCast(V, Ty);
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V, "ref.tmp");
- }
- return V;
+ return addr;
}
llvm::Constant *
-BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
- // Generate the block descriptor.
- const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- getTypes().ConvertType(getContext().IntTy));
-
- llvm::Constant *DescriptorFields[4];
-
- // Reserved
- DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
-
- // Block literal size. For global blocks we just use the size of the generic
- // block literal struct.
- CharUnits BlockLiteralSize =
- CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
- DescriptorFields[1] =
- llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
-
- // signature. non-optional ObjC-style method descriptor @encode sequence
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- DescriptorFields[2] = llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
-
- // layout
- DescriptorFields[3] =
- llvm::ConstantInt::get(UnsignedLongTy,0);
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
+ const char *name) {
+ CGBlockInfo blockInfo(blockExpr, name);
- // build the structure from the 4 elements
- llvm::Constant *DescriptorStruct =
- llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false);
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(*this, blockInfo);
- llvm::GlobalVariable *Descriptor =
- new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
- llvm::GlobalVariable::InternalLinkage,
- DescriptorStruct, "__block_descriptor_global");
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn;
+ {
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
+ blockInfo,
+ 0, LocalDeclMap);
+ }
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
- int FieldCount = 5;
- // Generate the constants for the block literal.
+ return buildGlobalBlock(*this, blockInfo, blockFn);
+}
- std::vector<llvm::Constant*> LiteralFields(FieldCount);
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn) {
+ assert(blockInfo.CanBeGlobal);
- CGBlockInfo Info(n);
- llvm::Constant *BlockVarLayout;
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
- llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE,
- Info, 0, BlockVarLayout,
- LocalDeclMap);
- assert(Info.BlockSize == BlockLiteralSize
- && "no imports allowed for global block");
+ // Generate the constants for the block literal initializer.
+ llvm::Constant *fields[BlockHeaderSize];
// isa
- LiteralFields[0] = CGM.getNSConcreteGlobalBlock();
+ fields[0] = CGM.getNSConcreteGlobalBlock();
// __flags
- unsigned flags = computeBlockFlag(CGM, BE,
- (BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE));
- LiteralFields[1] =
- llvm::ConstantInt::get(IntTy, flags);
+ BlockFlags flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(),
+ BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE);
+ fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
// Reserved
- LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+ fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
// Function
- LiteralFields[3] = Fn;
+ fields[3] = blockFn;
// Descriptor
- LiteralFields[4] = Descriptor;
-
- llvm::Constant *BlockLiteralStruct =
- llvm::ConstantStruct::get(VMContext, LiteralFields, false);
-
- llvm::GlobalVariable *BlockLiteral =
- new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
- llvm::GlobalVariable::InternalLinkage,
- BlockLiteralStruct, "__block_literal_global");
+ fields[4] = buildBlockDescriptor(CGM, blockInfo);
- return BlockLiteral;
-}
+ llvm::Constant *init =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(), fields, BlockHeaderSize,
+ /*packed*/ false);
-llvm::Value *CodeGenFunction::LoadBlockStruct() {
- llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
- "self");
- // For now, we codegen based upon byte offsets.
- return Builder.CreateBitCast(V, PtrToInt8Ty);
+ llvm::GlobalVariable *literal =
+ new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ true,
+ llvm::GlobalVariable::InternalLinkage,
+ init,
+ "__block_literal_global");
+ literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Return a constant of the appropriately-casted type.
+ const llvm::Type *requiredType =
+ CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
+ return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
llvm::Function *
-CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
- CGBlockInfo &Info,
- const Decl *OuterFuncDecl,
- llvm::Constant *& BlockVarLayout,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &blockInfo,
+ const Decl *outerFnDecl,
+ const DeclMapTy &ldm) {
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
- // Check if we should generate debug info for this block.
- if (CGM.getDebugInfo())
- DebugInfo = CGM.getDebugInfo();
+ DebugInfo = CGM.getDebugInfo();
+ BlockInfo = &blockInfo;
// Arrange for local static and local extern declarations to appear
- // to be local to this function as well, as they are directly referenced
- // in a block.
- for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
- i != ldm.end();
- ++i) {
- const VarDecl *VD = dyn_cast<VarDecl>(i->first);
-
- if (VD->getStorageClass() == SC_Static || VD->hasExternalStorage())
- LocalDeclMap[VD] = i->second;
- }
-
- BlockOffset =
- CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType());
- BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
-
- const FunctionType *BlockFunctionType = BExpr->getFunctionType();
- QualType ResultType;
- FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType);
- bool IsVariadic;
- if (const FunctionProtoType *FTy =
- dyn_cast<FunctionProtoType>(BlockFunctionType)) {
- ResultType = FTy->getResultType();
- IsVariadic = FTy->isVariadic();
- } else {
- // K&R style block.
- ResultType = BlockFunctionType->getResultType();
- IsVariadic = false;
+ // to be local to this function as well, in case they're directly
+ // referenced in a block.
+ for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
+ const VarDecl *var = dyn_cast<VarDecl>(i->first);
+ if (var && !var->hasLocalStorage())
+ LocalDeclMap[var] = i->second;
}
- FunctionArgList Args;
-
- CurFuncDecl = OuterFuncDecl;
+ // Begin building the function declaration.
- const BlockDecl *BD = BExpr->getBlockDecl();
+ // Build the argument list.
+ FunctionArgList args;
+ // The first argument is the block pointer. Just take it as a void*
+ // and cast it later.
+ QualType selfTy = getContext().VoidPtrTy;
IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
- // Build the block struct now.
- AllocateAllBlockDeclRefs(*this, Info);
+ // FIXME: this leaks, and we only need it very temporarily.
+ ImplicitParamDecl *selfDecl =
+ ImplicitParamDecl::Create(getContext(),
+ const_cast<BlockDecl*>(blockDecl),
+ SourceLocation(), II, selfTy);
+ args.push_back(std::make_pair(selfDecl, selfTy));
+
+ // Now add the rest of the parameters.
+ for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
+ e = blockDecl->param_end(); i != e; ++i)
+ args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ // Create the function declaration.
+ const FunctionProtoType *fnType =
+ cast<FunctionProtoType>(blockInfo.getBlockExpr()->getFunctionType());
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().getFunctionInfo(fnType->getResultType(), args,
+ fnType->getExtInfo());
+ const llvm::FunctionType *fnLLVMType =
+ CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
+
+ MangleBuffer name;
+ CGM.getBlockMangledName(GD, name, blockDecl);
+ llvm::Function *fn =
+ llvm::Function::Create(fnLLVMType, llvm::GlobalValue::InternalLinkage,
+ name.getString(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
+
+ // Begin generating the function.
+ StartFunction(blockDecl, fnType->getResultType(), fn, args,
+ blockInfo.getBlockExpr()->getBody()->getLocEnd());
+ CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
+
+ // Okay. Undo some of what StartFunction did. We really don't need
+ // an alloca for the block address; in theory we could remove it,
+ // but that might do unpleasant things to debug info.
+ llvm::AllocaInst *blockAddrAlloca
+ = cast<llvm::AllocaInst>(LocalDeclMap[selfDecl]);
+ llvm::Value *blockAddr = Builder.CreateLoad(blockAddrAlloca);
+ BlockPointer = Builder.CreateBitCast(blockAddr,
+ blockInfo.StructureType->getPointerTo(),
+ "block");
- // Capture block layout info. here.
- if (CGM.getContext().getLangOptions().ObjC1)
- BlockVarLayout = CGM.getObjCRuntime().GCBlockLayout(*this, Info.DeclRefs);
- else
- BlockVarLayout = llvm::Constant::getNullValue(PtrToInt8Ty);
-
- QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
- BlockLayout);
-
- // FIXME: This leaks
- ImplicitParamDecl *SelfDecl =
- ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
- SourceLocation(), II,
- ParmTy);
-
- Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
- BlockStructDecl = SelfDecl;
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(BlockPointer,
+ blockInfo.CXXThisIndex,
+ "block.captured-this");
+ CXXThisValue = Builder.CreateLoad(addr, "this");
+ }
- for (BlockDecl::param_const_iterator i = BD->param_begin(),
- e = BD->param_end(); i != e; ++i)
- Args.push_back(std::make_pair(*i, (*i)->getType()));
+ // LoadObjCSelf() expects there to be an entry for 'self' in LocalDeclMap;
+ // appease it.
+ if (const ObjCMethodDecl *method
+ = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) {
+ const VarDecl *self = method->getSelfDecl();
+
+ // There might not be a capture for 'self', but if there is...
+ if (blockInfo.Captures.count(self)) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(self);
+ llvm::Value *selfAddr = Builder.CreateStructGEP(BlockPointer,
+ capture.getIndex(),
+ "block.captured-self");
+ LocalDeclMap[self] = selfAddr;
+ }
+ }
- const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo);
+ // Also force all the constant captures.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (!capture.isConstant()) continue;
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+ unsigned align = getContext().getDeclAlign(variable).getQuantity();
- MangleBuffer Name;
- CGM.getMangledName(GD, Name, BD);
- llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name.getString(), &CGM.getModule());
+ llvm::AllocaInst *alloca =
+ CreateMemTemp(variable->getType(), "block.captured-const");
+ alloca->setAlignment(align);
- CGM.SetInternalFunctionAttributes(BD, Fn, FI);
- StartFunction(BD, ResultType, Fn, Args,
- BExpr->getBody()->getLocEnd());
-
- CurFuncDecl = OuterFuncDecl;
-
- QualType FnType(BlockFunctionType, 0);
- bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
-
- IdentifierInfo *ID = &getContext().Idents.get(Name.getString());
- CurCodeDecl = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), ID, FnType,
- 0,
- SC_Static,
- SC_None,
- false, HasPrototype);
- if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FnType)) {
- const FunctionDecl *CFD = dyn_cast<FunctionDecl>(CurCodeDecl);
- FunctionDecl *FD = const_cast<FunctionDecl *>(CFD);
- llvm::SmallVector<ParmVarDecl*, 16> Params;
- for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
- Params.push_back(ParmVarDecl::Create(getContext(), FD,
- SourceLocation(), 0,
- FT->getArgType(i), /*TInfo=*/0,
- SC_None, SC_None, 0));
- FD->setParams(Params.data(), Params.size());
- }
-
-
- // If we have a C++ 'this' reference, go ahead and force it into
- // existence now.
- if (Info.CXXThisRef) {
- assert(!BlockCXXThisOffset.isZero() &&
- "haven't yet allocated 'this' reference");
-
- // TODO: I have a dream that one day this will be typed.
- llvm::Value *BlockLiteral = LoadBlockStruct();
- llvm::Value *ThisPtrRaw =
- Builder.CreateConstInBoundsGEP1_64(BlockLiteral,
- BlockCXXThisOffset.getQuantity(),
- "this.ptr.raw");
-
- const llvm::Type *Ty =
- CGM.getTypes().ConvertType(Info.CXXThisRef->getType());
- Ty = llvm::PointerType::get(Ty, 0);
- llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr");
-
- CXXThisValue = Builder.CreateLoad(ThisPtr, "this");
- }
+ Builder.CreateStore(capture.getConstant(), alloca, align);
- // If we have an Objective C 'self' reference, go ahead and force it
- // into existence now.
- if (Info.NeedsObjCSelf) {
- ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
- LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false);
+ LocalDeclMap[variable] = alloca;
}
// Save a spot to insert the debug information for all the BlockDeclRefDecls.
@@ -852,7 +986,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
--entry_ptr;
- EmitStmt(BExpr->getBody());
+ EmitStmt(blockDecl->getBody());
// Remember where we were...
llvm::BasicBlock *resume = Builder.GetInsertBlock();
@@ -861,96 +995,78 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
++entry_ptr;
Builder.SetInsertPoint(entry, entry_ptr);
+ // Emit debug information for all the BlockDeclRefDecls.
+ // FIXME: also for 'this'
if (CGDebugInfo *DI = getDebugInfo()) {
- // Emit debug information for all the BlockDeclRefDecls.
- // FIXME: also for 'this'
- for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) {
- if (const BlockDeclRefExpr *BDRE =
- dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) {
- const ValueDecl *D = BDRE->getDecl();
- DI->setLocation(D->getLocation());
- DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
- LocalDeclMap[getBlockStructDecl()],
- Builder, this);
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ DI->setLocation(variable->getLocation());
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
}
+
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, blockAddrAlloca,
+ Builder, blockInfo);
}
}
+
// And resume where we left off.
if (resume == 0)
Builder.ClearInsertionPoint();
else
Builder.SetInsertPoint(resume);
- FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+ FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
- // The runtime needs a minimum alignment of a void *.
- CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
- BlockOffset = CharUnits::fromQuantity(
- llvm::RoundUpToAlignment(BlockOffset.getQuantity(),
- MinAlign.getQuantity()));
-
- Info.BlockSize = BlockOffset;
- Info.BlockAlign = BlockAlign;
- Info.BlockLayout = BlockLayout;
- Info.BlockHasCopyDispose = BlockHasCopyDispose;
- return Fn;
+ return fn;
}
-CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
- assert((Align.isPositive()) && "alignment must be 1 byte or more");
-
- CharUnits OldOffset = BlockOffset;
-
- // Ensure proper alignment, even if it means we have to have a gap
- BlockOffset = CharUnits::fromQuantity(
- llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity()));
- BlockAlign = std::max(Align, BlockAlign);
-
- CharUnits Pad = BlockOffset - OldOffset;
- if (Pad.isPositive()) {
- QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
- llvm::APInt(32,
- Pad.getQuantity()),
- ArrayType::Normal, 0);
- ValueDecl *PadDecl = VarDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(),
- 0, QualType(PadTy), 0,
- SC_None, SC_None);
- Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
- SourceLocation());
- BlockLayout.push_back(E);
- }
+/*
+ notes.push_back(HelperInfo());
+ HelperInfo &note = notes.back();
+ note.index = capture.getIndex();
+ note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
+ note.cxxbar_import = ci->getCopyExpr();
+
+ if (ci->isByRef()) {
+ note.flag = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+ */
- BlockOffset += Size;
- return BlockOffset - Size;
-}
-llvm::Constant *BlockFunction::
-GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelperp) {
- QualType R = getContext().VoidTy;
- FunctionArgList Args;
+
+
+llvm::Constant *
+CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
// FIXME: This leaks
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Src, Src->getType()));
+ ImplicitParamDecl *dstDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(dstDecl, dstDecl->getType()));
+ ImplicitParamDecl *srcDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
- // FIXME: We'd like to put these into a mergable by content, with
- // internal linkage.
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: it would be nice if these were mergeable with things with
+ // identical semantics.
+ const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -959,80 +1075,89 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_block_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false,
true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
-
- llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
- llvm::Type *PtrPtrT;
-
- if (NoteForHelperp) {
- std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
-
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
- SrcObj = Builder.CreateLoad(SrcObj);
-
- llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
- llvm::Type *PtrPtrT;
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
- DstObj = Builder.CreateLoad(DstObj);
-
- for (unsigned i=0; i < NoteForHelper.size(); ++i) {
- int flag = NoteForHelper[i].flag;
- int index = NoteForHelper[i].index;
-
- if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
- || NoteForHelper[i].RequiresCopying) {
- llvm::Value *Srcv = SrcObj;
- Srcv = Builder.CreateStructGEP(Srcv, index);
- Srcv = Builder.CreateBitCast(Srcv,
- llvm::PointerType::get(PtrToInt8Ty, 0));
- Srcv = Builder.CreateLoad(Srcv);
-
- llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
- Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
-
- llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = CGM.getBlockObjectAssign();
- Builder.CreateCall3(F, Dstv, Srcv, N);
- }
+ StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+
+ const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+
+ llvm::Value *dst = GetAddrOfLocalVar(dstDecl);
+ dst = Builder.CreateLoad(dst);
+ dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ const Expr *copyExpr = ci->getCopyExpr();
+ unsigned flags = 0;
+
+ if (copyExpr) {
+ assert(!ci->isByRef());
+ // don't bother computing flags
+ } else if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ flags = BLOCK_FIELD_IS_BLOCK;
+ } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ }
+
+ if (!copyExpr && !flags) continue;
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+ llvm::Value *dstField = Builder.CreateStructGEP(dst, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (copyExpr) {
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags));
}
}
- CGF.FinishFunction();
+ FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
-llvm::Constant *BlockFunction::
-GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
- const llvm::StructType* T,
- std::vector<HelperInfo> *NoteForHelperp) {
- QualType R = getContext().VoidTy;
+llvm::Constant *
+CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
- FunctionArgList Args;
+ FunctionArgList args;
// FIXME: This leaks
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
-
- Args.push_back(std::make_pair(Src, Src->getType()));
+ ImplicitParamDecl *srcDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1041,59 +1166,76 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
+ FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
-
- if (NoteForHelperp) {
- std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
-
- llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
- llvm::Type *PtrPtrT;
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
- SrcObj = Builder.CreateLoad(SrcObj);
-
- for (unsigned i=0; i < NoteForHelper.size(); ++i) {
- int flag = NoteForHelper[i].flag;
- int index = NoteForHelper[i].index;
-
- if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
- || NoteForHelper[i].RequiresCopying) {
- llvm::Value *Srcv = SrcObj;
- Srcv = Builder.CreateStructGEP(Srcv, index);
- Srcv = Builder.CreateBitCast(Srcv,
- llvm::PointerType::get(PtrToInt8Ty, 0));
- Srcv = Builder.CreateLoad(Srcv);
-
- BuildBlockRelease(Srcv, flag);
- }
+ StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+
+ const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ BlockFieldFlags flags;
+ const CXXDestructorDecl *dtor = 0;
+
+ if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ flags = BLOCK_FIELD_IS_BLOCK;
+ } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ } else if (C.getLangOptions().CPlusPlus) {
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
+ if (!record->hasTrivialDestructor())
+ dtor = record->getDestructor();
}
- }
- CGF.FinishFunction();
+ if (!dtor && flags.empty()) continue;
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
-}
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
-llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelper) {
- return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
- T, NoteForHelper);
-}
+ // If there's an explicit copy expression, we do that.
+ if (dtor) {
+ PushDestructorCleanup(dtor, srcField);
+
+ // Otherwise we call _Block_object_dispose. It wouldn't be too
+ // hard to just emit this as a cleanup if we wanted to make sure
+ // that things were done in reverse.
+ } else {
+ llvm::Value *value = Builder.CreateLoad(srcField);
+ value = Builder.CreateBitCast(value, VoidPtrTy);
+ BuildBlockRelease(value, flags);
+ }
+ }
+
+ cleanups.ForceCleanup();
-llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelperp) {
- return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
- T, NoteForHelperp);
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
-llvm::Constant *BlockFunction::
-GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+llvm::Constant *CodeGenFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, BlockFieldFlags flags,
+ const VarDecl *variable) {
QualType R = getContext().VoidTy;
FunctionArgList Args;
@@ -1121,10 +1263,10 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__Block_byref_id_object_copy_", &CGM.getModule());
+ "__Block_byref_object_copy_", &CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+ = &CGM.getContext().Idents.get("__Block_byref_object_copy_");
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
@@ -1132,37 +1274,43 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+ StartFunction(FD, R, Fn, Args, SourceLocation());
// dst->x
- llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Value *V = GetAddrOfLocalVar(Dst);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
- llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+ llvm::Value *DstObj = V;
// src->x
- V = CGF.GetAddrOfLocalVar(Src);
+ V = GetAddrOfLocalVar(Src);
V = Builder.CreateLoad(V);
V = Builder.CreateBitCast(V, T);
V = Builder.CreateStructGEP(V, 6, "x");
- V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
- llvm::Value *SrcObj = Builder.CreateLoad(V);
-
- flag |= BLOCK_BYREF_CALLER;
-
- llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = CGM.getBlockObjectAssign();
- Builder.CreateCall3(F, DstObj, SrcObj, N);
-
- CGF.FinishFunction();
+
+ if (Expr *copyExpr = getContext().getBlockVarCopyInits(variable)) {
+ llvm::Value *SrcObj = V;
+ EmitSynthesizedCXXCopyCtor(DstObj, SrcObj, copyExpr);
+ } else {
+ DstObj = Builder.CreateBitCast(DstObj, VoidPtrTy);
+ V = Builder.CreateBitCast(V, VoidPtrPtrTy);
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+ flags |= BLOCK_BYREF_CALLER;
+ llvm::Value *N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
+ llvm::Value *F = CGM.getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+ }
+
+ FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
llvm::Constant *
-BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
- int flag) {
+CodeGenFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ BlockFieldFlags flags,
+ const VarDecl *variable) {
QualType R = getContext().VoidTy;
FunctionArgList Args;
@@ -1184,11 +1332,11 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__Block_byref_id_object_dispose_",
+ "__Block_byref_object_dispose_",
&CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+ = &CGM.getContext().Idents.get("__Block_byref_object_dispose_");
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
@@ -1196,72 +1344,78 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+ StartFunction(FD, R, Fn, Args, SourceLocation());
- llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ llvm::Value *V = GetAddrOfLocalVar(Src);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
- V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
- V = Builder.CreateLoad(V);
- flag |= BLOCK_BYREF_CALLER;
- BuildBlockRelease(V, flag);
- CGF.FinishFunction();
+ // If it's not any kind of special object, it must have a destructor
+ // or something.
+ if (!flags.isSpecialPointer()) {
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ PushDestructorCleanup(variable->getType(), V);
+ PopCleanupBlocks(CleanupDepth);
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ // Otherwise, call _Block_object_dispose.
+ } else {
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(Int8PtrTy, 0));
+ V = Builder.CreateLoad(V);
+
+ flags |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flags);
+ }
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
-llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
- int Flag, unsigned Align) {
+llvm::Constant *CodeGenModule::BuildbyrefCopyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned align,
+ const VarDecl *var) {
// All alignments below that of pointer alignment collapse down to just
// pointer alignment, as we always have at least that much alignment to begin
// with.
- Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ align /= unsigned(getTarget().getPointerAlign(0) / 8);
// As an optimization, we only generate a single function of each kind we
// might need. We need a different one for each alignment and for each
// setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
- llvm::Constant *&Entry = CGM.AssignCache[Kind];
- if (Entry)
- return Entry;
- return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
+ uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
+ llvm::Constant *&Entry = AssignCache[Kind];
+ if (!Entry)
+ Entry = CodeGenFunction(*this).
+ GeneratebyrefCopyHelperFunction(T, flags, var);
+ return Entry;
}
-llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
- int Flag,
- unsigned Align) {
+llvm::Constant *CodeGenModule::BuildbyrefDestroyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned align,
+ const VarDecl *var) {
// All alignments below that of pointer alignment collpase down to just
// pointer alignment, as we always have at least that much alignment to begin
// with.
- Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ align /= unsigned(getTarget().getPointerAlign(0) / 8);
// As an optimization, we only generate a single function of each kind we
// might need. We need a different one for each alignment and for each
// setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
- llvm::Constant *&Entry = CGM.DestroyCache[Kind];
- if (Entry)
- return Entry;
- return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
+ uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
+ llvm::Constant *&Entry = DestroyCache[Kind];
+ if (!Entry)
+ Entry = CodeGenFunction(*this).
+ GeneratebyrefDestroyHelperFunction(T, flags, var);
+ return Entry;
}
-void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
llvm::Value *F = CGM.getBlockObjectDispose();
llvm::Value *N;
- V = Builder.CreateBitCast(V, PtrToInt8Ty);
- N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
Builder.CreateCall2(F, V, N);
}
-
-ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
-
-BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
- CGBuilderTy &B)
- : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) {
- PtrToInt8Ty = llvm::PointerType::getUnqual(
- llvm::Type::getInt8Ty(VMContext));
-
- BlockHasCopyDispose = false;
-}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 743e3c83be33..bee729dccee9 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -24,9 +24,6 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
-#include <vector>
-#include <map>
-
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
@@ -46,157 +43,153 @@ namespace llvm {
namespace clang {
namespace CodeGen {
+
class CodeGenModule;
+class CGBlockInfo;
+
+enum BlockFlag_t {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+};
+class BlockFlags {
+ uint32_t flags;
-class BlockBase {
+ BlockFlags(uint32_t flags) : flags(flags) {}
public:
- enum {
- BLOCK_HAS_COPY_DISPOSE = (1 << 25),
- BLOCK_HAS_CXX_OBJ = (1 << 26),
- BLOCK_IS_GLOBAL = (1 << 28),
- BLOCK_USE_STRET = (1 << 29),
- BLOCK_HAS_SIGNATURE = (1 << 30)
- };
-};
+ BlockFlags() : flags(0) {}
+ BlockFlags(BlockFlag_t flag) : flags(flag) {}
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
-class BlockModule : public BlockBase {
- ASTContext &Context;
- llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
- CodeGenTypes &Types;
- CodeGenModule &CGM;
- llvm::LLVMContext &VMContext;
+ friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
+ return BlockFlags(l.flags | r.flags);
+ }
+ friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFlags l, BlockFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+ return BlockFlags(l) | BlockFlags(r);
+}
- ASTContext &getContext() const { return Context; }
- llvm::Module &getModule() const { return TheModule; }
- CodeGenTypes &getTypes() { return Types; }
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
-public:
- int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
- const llvm::Type *getBlockDescriptorType();
+enum BlockFieldFlag_t {
+ BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
- const llvm::Type *getGenericBlockLiteralType();
+ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
+ helpers */
- llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+};
- const llvm::Type *BlockDescriptorType;
- const llvm::Type *GenericBlockLiteralType;
+class BlockFieldFlags {
+ uint32_t flags;
- struct {
- int GlobalUniqueCount;
- } Block;
+ BlockFieldFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFieldFlags() : flags(0) {}
+ BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
- const llvm::PointerType *PtrToInt8Ty;
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
- std::map<uint64_t, llvm::Constant *> AssignCache;
- std::map<uint64_t, llvm::Constant *> DestroyCache;
+ /// Answers whether the flags indicate that this field is an object
+ /// or block pointer that requires _Block_object_assign/dispose.
+ bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
- BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
- CodeGenTypes &T, CodeGenModule &CodeGen)
- : Context(C), TheModule(M), TheTargetData(TD), Types(T),
- CGM(CodeGen), VMContext(M.getContext()),
- BlockDescriptorType(0), GenericBlockLiteralType(0) {
- Block.GlobalUniqueCount = 0;
- PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+ friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
+ return BlockFieldFlags(l.flags | r.flags);
+ }
+ friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
+ return (l.flags & r.flags);
}
-
- bool BlockRequiresCopying(QualType Ty)
- { return getContext().BlockRequiresCopying(Ty); }
};
+inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
+ return BlockFieldFlags(l) | BlockFieldFlags(r);
+}
-class BlockFunction : public BlockBase {
- CodeGenModule &CGM;
- ASTContext &getContext() const;
-
-protected:
- llvm::LLVMContext &VMContext;
-
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
public:
- CodeGenFunction &CGF;
-
- const llvm::PointerType *PtrToInt8Ty;
- struct HelperInfo {
- int index;
- int flag;
- bool RequiresCopying;
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// The field index of 'this' within the block, if there is one.
+ unsigned CXXThisIndex;
+
+ class Capture {
+ uintptr_t Data;
+
+ public:
+ bool isIndex() const { return (Data & 1) != 0; }
+ bool isConstant() const { return !isIndex(); }
+ unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+
+ static Capture makeIndex(unsigned index) {
+ Capture v;
+ v.Data = (index << 1) | 1;
+ return v;
+ }
+
+ static Capture makeConstant(llvm::Value *value) {
+ Capture v;
+ v.Data = reinterpret_cast<uintptr_t>(value);
+ return v;
+ }
};
- enum {
- BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
- block, ... */
- BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
- BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
- variable */
- BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
- helpers */
- BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
- support routines */
- BLOCK_BYREF_CURRENT_MAX = 256
- };
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ /// CanBeGlobal - True if the block can be global, i.e. it has
+ /// no non-constant captures.
+ bool CanBeGlobal : 1;
- CGBuilderTy &Builder;
+ /// True if the block needs a custom copy or dispose function.
+ bool NeedsCopyDispose : 1;
- BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+ /// HasCXXObject - True if the block's custom copy/dispose functions
+ /// need to be run even in GC mode.
+ bool HasCXXObject : 1;
- /// BlockOffset - The offset in bytes for the next allocation of an
- /// imported block variable.
- CharUnits BlockOffset;
- /// BlockAlign - Maximal alignment needed for the Block expressed in
- /// characters.
+ const llvm::StructType *StructureType;
+ const BlockExpr *Block;
+ CharUnits BlockSize;
CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ const Capture &getCapture(const VarDecl *var) const {
+ llvm::DenseMap<const VarDecl*, Capture>::const_iterator
+ it = Captures.find(var);
+ assert(it != Captures.end() && "no entry for variable!");
+ return it->second;
+ }
+
+ const BlockDecl *getBlockDecl() const { return Block->getBlockDecl(); }
+ const BlockExpr *getBlockExpr() const { return Block; }
- /// getBlockOffset - Allocate a location within the block's storage
- /// for a value with the given size and alignment requirements.
- CharUnits getBlockOffset(CharUnits Size, CharUnits Align);
-
- /// BlockHasCopyDispose - True iff the block uses copy/dispose.
- bool BlockHasCopyDispose;
-
- /// BlockLayout - The layout of the block's storage, represented as
- /// a sequence of expressions which require such storage. The
- /// expressions can be:
- /// - a BlockDeclRefExpr, indicating that the given declaration
- /// from an enclosing scope is needed by the block;
- /// - a DeclRefExpr, which always wraps an anonymous VarDecl with
- /// array type, used to insert padding into the block; or
- /// - a CXXThisExpr, indicating that the C++ 'this' value should
- /// propagate from the parent to the block.
- /// This is a really silly representation.
- llvm::SmallVector<const Expr *, 8> BlockLayout;
-
- /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
- llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
-
- /// BlockCXXThisOffset - The offset of the C++ 'this' value within
- /// the block structure.
- CharUnits BlockCXXThisOffset;
-
- ImplicitParamDecl *BlockStructDecl;
- ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
-
- llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
- std::vector<HelperInfo> *);
- llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
- std::vector<HelperInfo> *);
-
- llvm::Constant *BuildCopyHelper(const llvm::StructType *,
- std::vector<HelperInfo> *);
- llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
- std::vector<HelperInfo> *);
-
- llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
- llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
-
- llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
- unsigned Align);
- llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
- unsigned Align);
-
- void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
-
- bool BlockRequiresCopying(QualType Ty)
- { return getContext().BlockRequiresCopying(Ty); }
+ CGBlockInfo(const BlockExpr *blockExpr, const char *Name);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 986f621f64f5..5eeb605f18ee 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -30,40 +30,39 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
bool LoadLoad, bool LoadStore,
bool StoreLoad, bool StoreStore,
bool Device) {
- Value *True = llvm::ConstantInt::getTrue(CGF.getLLVMContext());
- Value *False = llvm::ConstantInt::getFalse(CGF.getLLVMContext());
+ Value *True = CGF.Builder.getTrue();
+ Value *False = CGF.Builder.getFalse();
Value *C[5] = { LoadLoad ? True : False,
LoadStore ? True : False,
StoreLoad ? True : False,
- StoreStore ? True : False,
+ StoreStore ? True : False,
Device ? True : False };
CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
C, C + 5);
}
-static Value *EmitCastToInt(CodeGenFunction &CGF,
- const llvm::Type *ToType, Value *Val) {
- if (Val->getType()->isPointerTy()) {
- return CGF.Builder.CreatePtrToInt(Val, ToType);
- }
- assert(Val->getType()->isIntegerTy() &&
- "Used a non-integer and non-pointer type with atomic builtin");
- assert(Val->getType()->getScalarSizeInBits() <=
- ToType->getScalarSizeInBits() && "Integer type too small");
- return CGF.Builder.CreateSExtOrBitCast(Val, ToType);
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, const llvm::IntegerType *IntType) {
+ V = CGF.EmitToMemory(V, T);
+
+ if (V->getType()->isPointerTy())
+ return CGF.Builder.CreatePtrToInt(V, IntType);
+
+ assert(V->getType() == IntType);
+ return V;
}
-static Value *EmitCastFromInt(CodeGenFunction &CGF, QualType ToQualType,
- Value *Val) {
- const llvm::Type *ToType = CGF.ConvertType(ToQualType);
- if (ToType->isPointerTy()) {
- return CGF.Builder.CreateIntToPtr(Val, ToType);
- }
- assert(Val->getType()->isIntegerTy() &&
- "Used a non-integer and non-pointer type with atomic builtin");
- assert(Val->getType()->getScalarSizeInBits() >=
- ToType->getScalarSizeInBits() && "Integer type too small");
- return CGF.Builder.CreateTruncOrBitCast(Val, ToType);
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, const llvm::Type *ResultType) {
+ V = CGF.EmitFromMemory(V, T);
+
+ if (ResultType->isPointerTy())
+ return CGF.Builder.CreateIntToPtr(V, ResultType);
+
+ assert(V->getType() == ResultType);
+ return V;
}
// The atomic builtins are also full memory barriers. This is a utility for
@@ -85,43 +84,69 @@ static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
/// and the expression node.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E) {
- const llvm::Type *ValueType =
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
- Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
-
- Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))) };
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- EmitCallWithBarrier(CGF, AtomF, Args,
- Args + 2)));
+ CGF.getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ llvm::Value *Args[2];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
-// the expression node, where the return value is the result of the
-// operation.
+/// the expression node, where the return value is the result of the
+/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
- const llvm::Type *ValueType =
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
- Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
-
- Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))) };
- Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- CGF.Builder.CreateBinOp(Op, Result,
- Args[1])));
+ CGF.getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ llvm::Value *Args[2];
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
}
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
@@ -153,10 +178,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Expr::EvalResult Result;
if (E->Evaluate(Result, CGM.getContext())) {
if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(VMContext,
+ return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
- else if (Result.Val.isFloat())
- return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
+ if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
+ Result.Val.getFloat()));
}
switch (BuiltinID) {
@@ -168,7 +194,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
@@ -181,7 +207,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@@ -310,7 +336,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
assert(CI);
uint64_t val = CI->getZExtValue();
- CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1);
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
return RValue::get(Builder.CreateCall2(F,
@@ -332,11 +358,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
- if (CatchUndefined && HaveInsertPoint())
+ if (CatchUndefined)
EmitBranch(getTrapBB());
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ else
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("unreachable.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__builtin_powi:
@@ -495,18 +525,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp"));
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
- Address,
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
- SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BImemcpy:
@@ -514,11 +539,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
- SizeVal->getType()),
- Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false);
return RValue::get(Address);
}
@@ -536,24 +557,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
- SizeVal->getType()),
- Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
- Address,
- Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- llvm::Type::getInt8Ty(VMContext)),
- SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BI__builtin_dwarf_cfa: {
@@ -621,9 +634,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
: Intrinsic::eh_return_i64,
0, 0);
Builder.CreateCall2(F, Int, Ptr);
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("builtin_eh_return.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
@@ -640,11 +656,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
//
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
- LLVMContext &C = CGM.getLLVMContext();
-
// Cast the pointer to intptr_t.
Value *Ptr = EmitScalarExpr(E->getArg(0));
- const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C);
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
// If that's 64 bits, we're done.
@@ -676,20 +689,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
- Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
- // longjmp doesn't return; mark this as unreachable
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ // longjmp doesn't return; mark this as unreachable.
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("longjmp.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
@@ -788,23 +804,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16: {
- const llvm::Type *ValueType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ QualType T = E->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
IntrinsicTypes, 2);
- Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(2))) };
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- EmitCallWithBarrier(CGF, AtomF, Args,
- Args + 3)));
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(*this, Args[1], T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Result = EmitFromInt(*this, Result, T, ValueType);
+ return RValue::get(Result);
}
case Builtin::BI__sync_bool_compare_and_swap_1:
@@ -812,26 +834,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16: {
- const llvm::Type *ValueType =
- llvm::IntegerType::get(
- CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ QualType T = E->getArg(1)->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
IntrinsicTypes, 2);
- Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(2))) };
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
Value *OldVal = Args[1];
Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
- return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
+ return RValue::get(Result);
}
case Builtin::BI__sync_lock_test_and_set_1:
@@ -935,11 +961,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
Function *F = CGM.getIntrinsic(IntrinsicID);
const llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
- Value *ArgValue = EmitScalarExpr(E->getArg(i));
+ Value *ArgValue;
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ ArgValue = EmitScalarExpr(E->getArg(i));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ }
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
@@ -956,7 +1001,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
QualType BuiltinRetType = E->getType();
- const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
+ const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -997,7 +1042,8 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
-const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
+static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type,
+ bool q) {
switch (type) {
default: break;
case 0:
@@ -1012,17 +1058,15 @@ const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
return 0;
}
-Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, bool widen) {
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
- if (widen)
- nElts <<= 1;
SmallVector<Constant*, 16> Indices(nElts, C);
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
- const char *name, bool splat,
+ const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
@@ -1032,10 +1076,6 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- if (splat) {
- Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j]));
- Ops.resize(j);
- }
return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
}
@@ -1047,7 +1087,7 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
- return llvm::ConstantVector::get(CV.begin(), CV.size());
+ return llvm::ConstantVector::get(CV);
}
/// GetPointeeAlignment - Given an expression with a pointer type, find the
@@ -1099,9 +1139,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Determine the overloaded type of this builtin.
const llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
- Ty = llvm::Type::getFloatTy(VMContext);
+ Ty = llvm::Type::getFloatTy(getLLVMContext());
else
- Ty = llvm::Type::getDoubleTy(VMContext);
+ Ty = llvm::Type::getDoubleTy(getLLVMContext());
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
@@ -1117,9 +1157,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool usgn = type & 0x08;
bool quad = type & 0x10;
bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
- bool splat = false;
+ (void)poly; // Only used in assert()s.
+ bool rightShift = false;
- const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad);
+ const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
const llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1127,37 +1168,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
default: return 0;
- case ARM::BI__builtin_neon_vaba_v:
- case ARM::BI__builtin_neon_vabaq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- SmallVector<Value*, 2> Args;
- Args.push_back(Ops[1]);
- Args.push_back(Ops[2]);
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Args, "vaba");
- return Builder.CreateAdd(Ops[0], Ops[1], "vaba");
- }
- case ARM::BI__builtin_neon_vabal_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- SmallVector<Value*, 2> Args;
- Args.push_back(Ops[1]);
- Args.push_back(Ops[2]);
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Args, "vabal");
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- return Builder.CreateAdd(Ops[0], Ops[1], "vabal");
- }
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
- case ARM::BI__builtin_neon_vabdl_v: {
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Ops, "vabdl");
- return Builder.CreateZExt(Ops[0], Ty, "vabdl");
- }
case ARM::BI__builtin_neon_vabs_v:
case ARM::BI__builtin_neon_vabsq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
@@ -1165,51 +1179,28 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vaddhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
Ops, "vaddhn");
- case ARM::BI__builtin_neon_vaddl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateAdd(Ops[0], Ops[1], "vaddl");
- }
- case ARM::BI__builtin_neon_vaddw_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn)
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- else
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- return Builder.CreateAdd(Ops[0], Ops[1], "vaddw");
- }
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcage_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
return EmitNeonCall(F, Ops, "vcage");
}
case ARM::BI__builtin_neon_vcaleq_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcageq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
return EmitNeonCall(F, Ops, "vcage");
}
case ARM::BI__builtin_neon_vcalt_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcagt_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
return EmitNeonCall(F, Ops, "vcagt");
}
case ARM::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcagtq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
return EmitNeonCall(F, Ops, "vcagt");
}
case ARM::BI__builtin_neon_vcls_v:
@@ -1227,11 +1218,20 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
return EmitNeonCall(F, Ops, "vcnt");
}
- // FIXME: intrinsics for f16<->f32 convert missing from ARM target.
+ case ARM::BI__builtin_neon_vcvt_f16_v: {
+ assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_f16: {
+ assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
case ARM::BI__builtin_neon_vcvt_f32_v:
case ARM::BI__builtin_neon_vcvtq_f32_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(VMContext, 4, quad);
+ Ty = GetNeonType(getLLVMContext(), 4, quad);
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
}
@@ -1239,13 +1239,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_u32_v:
case ARM::BI__builtin_neon_vcvtq_s32_v:
case ARM::BI__builtin_neon_vcvtq_u32_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad));
return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty };
+ const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
Function *F = CGM.getIntrinsic(Int, Tys, 2);
return EmitNeonCall(F, Ops, "vcvt_n");
@@ -1254,28 +1254,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) };
+ const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
Function *F = CGM.getIntrinsic(Int, Tys, 2);
return EmitNeonCall(F, Ops, "vcvt_n");
}
- case ARM::BI__builtin_neon_vdup_lane_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]));
- case ARM::BI__builtin_neon_vdupq_lane_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]), true);
case ARM::BI__builtin_neon_vext_v:
case ARM::BI__builtin_neon_vextq_v: {
- ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
- int CV = C->getSExtValue();
+ int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value *SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
}
case ARM::BI__builtin_neon_vget_lane_i8:
@@ -1386,6 +1379,27 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld2_dup_v:
case ARM::BI__builtin_neon_vld3_dup_v:
case ARM::BI__builtin_neon_vld4_dup_v: {
+ // Handle 64-bit elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ default: assert(0 && "unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
switch (BuiltinID) {
case ARM::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
@@ -1430,46 +1444,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vminq_v:
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
- case ARM::BI__builtin_neon_vmlal_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
- }
- case ARM::BI__builtin_neon_vmlal_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- if (usgn) {
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- Ops[2] = Builder.CreateZExt(Ops[2], Ty);
- } else {
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- Ops[2] = Builder.CreateSExt(Ops[2], Ty);
- }
- Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
- return Builder.CreateAdd(Ops[0], Ops[1], "vmlal");
- }
- case ARM::BI__builtin_neon_vmlsl_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
- }
- case ARM::BI__builtin_neon_vmlsl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- if (usgn) {
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- Ops[2] = Builder.CreateZExt(Ops[2], Ty);
- } else {
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- Ops[2] = Builder.CreateSExt(Ops[2], Ty);
- }
- Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
- return Builder.CreateSub(Ops[0], Ops[1], "vmlsl");
- }
case ARM::BI__builtin_neon_vmovl_v: {
const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
@@ -1482,38 +1456,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
- case ARM::BI__builtin_neon_vmull_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[1] = EmitNeonSplat(Ops[1], cast<Constant>(Ops[2]));
- }
- case ARM::BI__builtin_neon_vmull_v: {
- if (poly)
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
- Ops, "vmull");
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateMul(Ops[0], Ops[1], "vmull");
- }
+ case ARM::BI__builtin_neon_vmul_v:
+ case ARM::BI__builtin_neon_vmulq_v:
+ assert(poly && "vmul builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1),
+ Ops, "vmul");
+ case ARM::BI__builtin_neon_vmull_v:
+ assert(poly && "vmull builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
+ Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
- case ARM::BI__builtin_neon_vpadalq_v:
+ case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal");
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ const llvm::Type *EltTy =
+ llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ const llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ const llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal");
+ }
case ARM::BI__builtin_neon_vpadd_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
Ops, "vpadd");
case ARM::BI__builtin_neon_vpaddl_v:
- case ARM::BI__builtin_neon_vpaddlq_v:
+ case ARM::BI__builtin_neon_vpaddlq_v: {
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl");
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ const llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ const llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl");
+ }
case ARM::BI__builtin_neon_vpmax_v:
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
@@ -1528,28 +1505,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vqaddq_v:
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
- case ARM::BI__builtin_neon_vqdmlal_lane_v:
- splat = true;
case ARM::BI__builtin_neon_vqdmlal_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
- Ops, "vqdmlal", splat);
- case ARM::BI__builtin_neon_vqdmlsl_lane_v:
- splat = true;
+ Ops, "vqdmlal");
case ARM::BI__builtin_neon_vqdmlsl_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
- Ops, "vqdmlsl", splat);
- case ARM::BI__builtin_neon_vqdmulh_lane_v:
- case ARM::BI__builtin_neon_vqdmulhq_lane_v:
- splat = true;
+ Ops, "vqdmlsl");
case ARM::BI__builtin_neon_vqdmulh_v:
case ARM::BI__builtin_neon_vqdmulhq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
- Ops, "vqdmulh", splat);
- case ARM::BI__builtin_neon_vqdmull_lane_v:
- splat = true;
+ Ops, "vqdmulh");
case ARM::BI__builtin_neon_vqdmull_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
- Ops, "vqdmull", splat);
+ Ops, "vqdmull");
case ARM::BI__builtin_neon_vqmovn_v:
Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
@@ -1557,26 +1525,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqneg_v:
+ case ARM::BI__builtin_neon_vqnegq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
Ops, "vqneg");
- case ARM::BI__builtin_neon_vqrdmulh_lane_v:
- case ARM::BI__builtin_neon_vqrdmulhq_lane_v:
- splat = true;
case ARM::BI__builtin_neon_vqrdmulh_v:
case ARM::BI__builtin_neon_vqrdmulhq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
- Ops, "vqrdmulh", splat);
+ Ops, "vqrdmulh");
case ARM::BI__builtin_neon_vqrshl_v:
case ARM::BI__builtin_neon_vqrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
- Ops, "vqrshrun_n", false, 1, true);
+ Ops, "vqrshrun_n", 1, true);
case ARM::BI__builtin_neon_vqshl_v:
case ARM::BI__builtin_neon_vqshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
@@ -1584,7 +1550,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vqshl_n_v:
case ARM::BI__builtin_neon_vqshlq_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n",
1, false);
case ARM::BI__builtin_neon_vqshlu_n_v:
case ARM::BI__builtin_neon_vqshluq_n_v:
@@ -1592,11 +1558,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops, "vqshlu", 1, false);
case ARM::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n",
1, true);
case ARM::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
- Ops, "vqshrun_n", false, 1, true);
+ Ops, "vqshrun_n", 1, true);
case ARM::BI__builtin_neon_vqsub_v:
case ARM::BI__builtin_neon_vqsubq_v:
Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
@@ -1622,12 +1588,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
case ARM::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
- Ops, "vrshrn_n", false, 1, true);
+ Ops, "vrshrn_n", 1, true);
case ARM::BI__builtin_neon_vrshr_n_v:
case ARM::BI__builtin_neon_vrshrq_n_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false,
- 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", 1, true);
case ARM::BI__builtin_neon_vrsqrte_v:
case ARM::BI__builtin_neon_vrsqrteq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
@@ -1665,14 +1630,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
case ARM::BI__builtin_neon_vshll_n_v:
Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1);
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", 1);
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
- Ops, "vshrn_n", false, 1, true);
+ Ops, "vshrn_n", 1, true);
case ARM::BI__builtin_neon_vshr_n_v:
case ARM::BI__builtin_neon_vshrq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1683,10 +1648,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
case ARM::BI__builtin_neon_vsri_n_v:
case ARM::BI__builtin_neon_vsriq_n_v:
- poly = true;
+ rightShift = true;
case ARM::BI__builtin_neon_vsli_n_v:
case ARM::BI__builtin_neon_vsliq_n_v:
- Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
Ops, "vsli_n");
case ARM::BI__builtin_neon_vsra_n_v:
@@ -1743,29 +1708,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
Ops, "vsubhn");
- case ARM::BI__builtin_neon_vsubl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateSub(Ops[0], Ops[1], "vsubl");
- }
- case ARM::BI__builtin_neon_vsubw_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn)
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- else
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- return Builder.CreateSub(Ops[0], Ops[1], "vsubw");
- }
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
@@ -1804,7 +1746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1813,7 +1755,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1824,7 +1766,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1832,7 +1774,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1843,7 +1785,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1852,7 +1794,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1861,13 +1803,57 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
+llvm::Value *CodeGenFunction::
+BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ std::vector<llvm::Constant*> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return llvm::ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i));
+
+ return Result;
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
-
llvm::SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
switch (BuiltinID) {
default: return 0;
@@ -1926,6 +1912,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return Builder.CreateBitCast(BuildVector(Ops),
+ llvm::Type::getX86_MMXTy(getLLVMContext()));
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ return Builder.CreateExtractElement(Ops[0],
+ llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_pslldi:
case X86::BI__builtin_ia32_psllqi:
case X86::BI__builtin_ia32_psllwi:
@@ -1987,7 +1981,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
@@ -1995,7 +1989,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
@@ -2037,7 +2031,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
@@ -2068,7 +2062,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
@@ -2112,7 +2106,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
{
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
Ops.pop_back();
@@ -2152,7 +2146,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
{
- Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
Ops.pop_back();
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 179716f631a8..7ffc6e732554 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -1,4 +1,4 @@
-//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,12 @@
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
@@ -60,13 +60,12 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
return true;
}
- // If any fields have a non-trivial destructor, we have to emit it
- // separately.
+ // If any field has a non-trivial destructor, we have to emit the
+ // destructor separately.
for (CXXRecordDecl::field_iterator I = Class->field_begin(),
E = Class->field_end(); I != E; ++I)
- if (const RecordType *RT = (*I)->getType()->getAs<RecordType>())
- if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
- return true;
+ if ((*I)->getType().isDestructedType())
+ return true;
// Try to find a unique base class with a non-trivial destructor.
const CXXRecordDecl *UniqueBase = 0;
@@ -103,7 +102,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
- if (ClassLayout.getBaseClassOffset(UniqueBase) != 0)
+ if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
@@ -180,7 +179,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
// Finally, set up the alias with its proper name and attributes.
- SetCommonAttributes(AliasDecl.getDecl(), Alias);
+ SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
@@ -227,7 +226,8 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
FPT->isVariadic());
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ /*ForVTable=*/false));
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -284,16 +284,15 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ /*ForVTable=*/false));
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *This, const llvm::Type *Ty) {
- Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
-
- llvm::Value *VTable = CGF.Builder.CreateBitCast(This, Ty);
- VTable = CGF.Builder.CreateLoad(VTable);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
return CGF.Builder.CreateLoad(VFuncPtr);
@@ -308,164 +307,84 @@ CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
+/// BuildVirtualCall - This routine is to support gcc's kext ABI making
+/// indirect call to virtual functions. It makes the call through indexing
+/// into the vtable.
llvm::Value *
-CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *&This, const llvm::Type *Ty) {
- DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
- uint64_t VTableIndex =
- CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
-
- return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
-}
-
-/// Implementation for CGCXXABI. Possibly this should be moved into
-/// the incomplete ABI implementations?
-
-CGCXXABI::~CGCXXABI() {}
-
-static void ErrorUnsupportedABI(CodeGenFunction &CGF,
- llvm::StringRef S) {
- Diagnostic &Diags = CGF.CGM.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot yet compile %1 in this ABI");
- Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
- DiagID)
- << S;
-}
-
-static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
- QualType T) {
- return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
-}
-
-const llvm::Type *
-CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
- return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
-}
-
-llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
- llvm::Value *&This,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "calls through member pointers");
-
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
- return llvm::Constant::getNullValue(FTy->getPointerTo());
-}
-
-llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
- llvm::Value *Base,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "loads of member pointers");
- const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
- return llvm::Constant::getNullValue(Ty);
-}
-
-llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
- const CastExpr *E,
- llvm::Value *Src) {
- ErrorUnsupportedABI(CGF, "member function pointer conversions");
- return GetBogusMemberPointer(CGM, E->getType());
+CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ const llvm::Type *Ty) {
+ llvm::Value *VTable = 0;
+ assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+ "BuildAppleKextVirtualCall - bad Qual kind");
+
+ const Type *QTy = Qual->getAsType();
+ QualType T = QualType(QTy, 0);
+ const RecordType *RT = T->getAs<RecordType>();
+ assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
+
+ VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+ uint64_t AddressPoint =
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ return Builder.CreateLoad(VFuncPtr);
}
+/// BuildVirtualCall - This routine makes indirect vtable call for
+/// call to virtual destructors. It returns 0 if it could not do it.
llvm::Value *
-CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
- llvm::Value *L,
- llvm::Value *R,
- const MemberPointerType *MPT,
- bool Inequality) {
- ErrorUnsupportedABI(CGF, "member function pointer comparison");
- return CGF.Builder.getFalse();
+CodeGenFunction::BuildAppleKextVirtualDestructorCall(
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD) {
+ llvm::Value * Callee = 0;
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(DD);
+ // FIXME. Dtor_Base dtor is always direct!!
+ // It need be somehow inline expanded into the caller.
+ // -O does that. But need to support -O0 as well.
+ if (MD->isVirtual() && Type != Dtor_Base) {
+ // Compute the function type we're calling.
+ const CGFunctionInfo *FInfo =
+ &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty
+ = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+
+ llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+ uint64_t AddressPoint =
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ Callee = Builder.CreateLoad(VFuncPtr);
+ }
+ return Callee;
}
llvm::Value *
-CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "member function pointer null testing");
- return CGF.Builder.getFalse();
-}
-
-llvm::Constant *
-CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
- return GetBogusMemberPointer(CGM, E->getType());
-}
-
-llvm::Constant *
-CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- return GetBogusMemberPointer(CGM, QualType(MPT, 0));
-}
-
-llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
- return GetBogusMemberPointer(CGM,
- CGM.getContext().getMemberPointerType(MD->getType(),
- MD->getParent()->getTypeForDecl()));
-}
-
-llvm::Constant *CGCXXABI::EmitMemberPointer(const FieldDecl *FD) {
- return GetBogusMemberPointer(CGM,
- CGM.getContext().getMemberPointerType(FD->getType(),
- FD->getParent()->getTypeForDecl()));
-}
-
-bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
- // Fake answer.
- return true;
-}
-
-void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
-
- // FIXME: I'm not entirely sure I like using a fake decl just for code
- // generation. Maybe we can come up with a better way?
- ImplicitParamDecl *ThisDecl
- = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
- &CGM.getContext().Idents.get("this"),
- MD->getThisType(CGM.getContext()));
- Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
- getThisDecl(CGF) = ThisDecl;
-}
-
-void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
- /// Initialize the 'this' slot.
- assert(getThisDecl(CGF) && "no 'this' variable for function");
- getThisValue(CGF)
- = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
- "this");
-}
-
-void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
- RValue RV, QualType ResultType) {
- CGF.EmitReturnOfRValue(RV, ResultType);
-}
-
-CharUnits CGCXXABI::GetArrayCookieSize(QualType ElementType) {
- return CharUnits::Zero();
-}
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, const llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
-llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- QualType ElementType) {
- // Should never be called.
- ErrorUnsupportedABI(CGF, "array cookie initialization");
- return 0;
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- ErrorUnsupportedABI(CGF, "array cookie reading");
-
- // This should be enough to avoid assertions.
- NumElements = 0;
- AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
- CookieSize = CharUnits::Zero();
-}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
deleted file mode 100644
index 1e6adb05a0d9..000000000000
--- a/lib/CodeGen/CGCXX.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These classes wrap the information about a call or function
-// definition used to handle ABI compliancy.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_CGCXX_H
-#define CLANG_CODEGEN_CGCXX_H
-
-namespace clang {
-
-/// CXXCtorType - C++ constructor types
-enum CXXCtorType {
- Ctor_Complete, // Complete object ctor
- Ctor_Base, // Base object ctor
- Ctor_CompleteAllocating // Complete object allocating ctor
-};
-
-/// CXXDtorType - C++ destructor types
-enum CXXDtorType {
- Dtor_Deleting, // Deleting dtor
- Dtor_Complete, // Complete object dtor
- Dtor_Base // Base object dtor
-};
-
-} // end namespace clang
-
-#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
new file mode 100644
index 000000000000..8373b660b2f4
--- /dev/null
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -0,0 +1,174 @@
+//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCXXABI::~CGCXXABI() { }
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ llvm::StringRef S) {
+ Diagnostic &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+const llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr, QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
+
+void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ ErrorUnsupportedABI(CGF, "static local variable initialization");
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 367e345918a2..de4df3dcbe84 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -32,18 +32,20 @@ namespace clang {
class CXXMethodDecl;
class CXXRecordDecl;
class FieldDecl;
+ class MangleContext;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
- class MangleContext;
/// Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
+ llvm::OwningPtr<MangleContext> MangleCtx;
- CGCXXABI(CodeGenModule &CGM) : CGM(CGM) {}
+ CGCXXABI(CodeGenModule &CGM)
+ : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
@@ -74,7 +76,9 @@ public:
virtual ~CGCXXABI();
/// Gets the mangle context.
- virtual MangleContext &getMangleContext() = 0;
+ MangleContext &getMangleContext() {
+ return *MangleCtx;
+ }
/// Find the LLVM type used to represent the given member pointer
/// type.
@@ -118,7 +122,8 @@ public:
virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
/// Create a member pointer for the given field.
- virtual llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
/// Emit a comparison between two member pointers. Returns an i1.
virtual llvm::Value *
@@ -185,7 +190,7 @@ public:
///
/// \param ElementType - the allocated type of the expression,
/// i.e. the pointee type of the expression result type
- virtual CharUnits GetArrayCookieSize(QualType ElementType);
+ virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
/// Initialize the array cookie for the given allocation.
///
@@ -198,6 +203,7 @@ public:
virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
/// Reads the array cookie associated with the given pointer,
@@ -214,9 +220,21 @@ public:
/// \param CookieSize - an out parameter which will be initialized
/// with the size of the cookie, or zero if there is no cookie
virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ /*************************** Static local guards ****************************/
+
+ /// Emits the guarded initializer and destructor setup for the given
+ /// variable, given that it couldn't be emitted as a constant.
+ ///
+ /// The variable may be:
+ /// - a static local variable
+ /// - a static data member of a class template instantiation
+ virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr);
+
};
/// Creates an instance of a C++ ABI class.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 475dfa5c102a..ae84b6196dfa 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -131,7 +131,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallVector<CanQualType, 16> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
@@ -170,7 +170,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
assert(isa<FunctionType>(FTy));
if (isa<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+ return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
assert(isa<FunctionProtoType>(FTy));
return getFunctionInfo(FTy.getAs<FunctionProtoType>());
}
@@ -195,13 +195,13 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
-
+
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
return getFunctionInfo(CD, GD.getCtorType());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
return getFunctionInfo(DD, GD.getDtorType());
-
+
return getFunctionInfo(FD);
}
@@ -256,14 +256,14 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Compute ABI information.
getABIInfo().computeInfo(*FI);
-
+
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
ABIArgInfo &RetInfo = FI->getReturnInfo();
if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
-
+
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
@@ -274,7 +274,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// we *just* filled in the FunctionInfo for.
if (!IsRecursive && !PointersToResolve.empty())
HandleLateResolvedPointers();
-
+
return *FI;
}
@@ -288,7 +288,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
NoReturn(_NoReturn), RegParm(_RegParm)
{
NumArgs = NumArgTys;
-
+
// FIXME: Coallocate with the CGFunctionInfo object.
Args = new ArgInfo[1 + NumArgTys];
Args[0].type = ResTy;
@@ -386,20 +386,20 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
if (SrcSTy->getNumElements() == 0) return SrcPtr;
-
+
const llvm::Type *FirstElt = SrcSTy->getElementType(0);
-
+
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
- uint64_t FirstEltSize =
+ uint64_t FirstEltSize =
CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
- if (FirstEltSize < DstSize &&
+ if (FirstEltSize < DstSize &&
FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
return SrcPtr;
-
+
// GEP into the first element.
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
-
+
// If the first element is a struct, recurse.
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
@@ -417,23 +417,23 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
CodeGenFunction &CGF) {
if (Val->getType() == Ty)
return Val;
-
+
if (isa<llvm::PointerType>(Val->getType())) {
// If this is Pointer->Pointer avoid conversion to and from int.
if (isa<llvm::PointerType>(Ty))
return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
-
+
// Convert the pointer to an integer so we can play with its width.
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
}
-
+
const llvm::Type *DestIntTy = Ty;
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
-
+
if (Val->getType() != DestIntTy)
Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
-
+
if (isa<llvm::PointerType>(Ty))
Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
return Val;
@@ -452,18 +452,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
-
+
// If SrcTy and Ty are the same, just do a load.
if (SrcTy == Ty)
return CGF.Builder.CreateLoad(SrcPtr);
-
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
-
+
if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
-
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
@@ -473,7 +473,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
}
-
+
// If load is legal, just bitcast the src pointer.
if (SrcSize >= DstSize) {
// Generally SrcSize is never greater than DstSize, since this means we are
@@ -489,7 +489,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
Load->setAlignment(1);
return Load;
}
-
+
// Otherwise do coercion through memory. This is stupid, but
// simple.
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
@@ -518,14 +518,14 @@ static void CreateCoercedStore(llvm::Value *Src,
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
return;
}
-
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
-
+
if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
}
-
+
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
@@ -534,7 +534,7 @@ static void CreateCoercedStore(llvm::Value *Src,
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
return;
}
-
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
@@ -590,7 +590,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
-
+
// For definition purposes, don't consider a K&R function variadic.
bool Variadic = false;
if (const FunctionProtoType *FPT =
@@ -673,7 +673,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
+
if (!VerifyFuncTypeComplete(FPT)) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
@@ -688,7 +688,7 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
- AttributeListType &PAL,
+ AttributeListType &PAL,
unsigned &CallingConv) {
unsigned FuncAttrs = 0;
unsigned RetAttrs = 0;
@@ -755,10 +755,10 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: we need to honor command line settings also.
- // FIXME: RegParm should be reduced in case of nested functions and/or global
- // register variable.
+ // FIXME: RegParm should be reduced in case of global register variable.
signed RegParm = FI.getRegParm();
+ if (!RegParm)
+ RegParm = CodeGenOpts.NumRegisterParameters;
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
@@ -769,7 +769,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
- // sense to do it here because parameters are so fucked up.
+ // sense to do it here because parameters are so messed up.
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerType())
@@ -786,7 +786,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attributes |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
-
+
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
@@ -866,13 +866,32 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
+
if (hasAggregateLLVMType(Ty)) {
- // Do nothing, aggregates and complex variables are accessed by
- // reference.
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested
+ if (ArgI.getIndirectRealign()) {
+ llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+
+ // Copy from the incoming argument pointer to the temporary with the
+ // appropriate alignment.
+ //
+ // FIXME: We should have a common utility for generating an aggregate
+ // copy.
+ const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits Size = getContext().getTypeSizeInChars(Ty);
+ Builder.CreateMemCpy(Builder.CreateBitCast(AlignedTemp, I8PtrTy),
+ Builder.CreateBitCast(V, I8PtrTy),
+ llvm::ConstantInt::get(IntPtrTy,
+ Size.getQuantity()),
+ ArgI.getIndirectAlign(),
+ false);
+ V = AlignedTemp;
+ }
} else {
// Load scalar value from indirect argument.
- unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity();
- V = EmitLoadOfScalar(V, false, Alignment, Ty);
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -891,7 +910,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArgI.getDirectOffset() == 0) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
llvm::Value *V = AI;
-
+
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
@@ -905,33 +924,33 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
-
+
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
- unsigned AlignmentToUse =
- CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ unsigned AlignmentToUse =
+ CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
AlignmentToUse = std::max(AlignmentToUse,
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
-
+
Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
llvm::Value *Ptr = V; // Pointer to store into.
-
+
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgI.getDirectOffset()) {
Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
- Ptr = Builder.CreateBitCast(Ptr,
+ Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
}
-
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
-
+
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
@@ -944,8 +963,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
-
-
+
+
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
@@ -1024,13 +1043,13 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RetAI.getDirectOffset() == 0) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
-
+
// If the instruction right before the insertion point is a store to the
// return value, we can elide the load, zap the store, and usually zap the
// alloca.
llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
+ if (InsertBB->empty() ||
!(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
RV = Builder.CreateLoad(ReturnValue);
@@ -1039,7 +1058,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
SI->eraseFromParent();
-
+
// If that was the only use of the return value, nuke it as well now.
if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
@@ -1052,10 +1071,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
if (unsigned Offs = RetAI.getDirectOffset()) {
V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
V = Builder.CreateConstGEP1_32(V, Offs);
- V = Builder.CreateBitCast(V,
+ V = Builder.CreateBitCast(V,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
-
+
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
@@ -1079,7 +1098,7 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
llvm::Value *Local = GetAddrOfLocalVar(Param);
QualType ArgType = Param->getType();
-
+
// For the most part, we just need to load the alloca, except:
// 1) aggregate r-values are actually pointers to temporaries, and
// 2) references to aggregates are pointers directly to the aggregate.
@@ -1180,7 +1199,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Ignore:
break;
-
+
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
@@ -1204,16 +1223,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
-
+
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgInfo.getDirectOffset()) {
SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
- SrcPtr = Builder.CreateBitCast(SrcPtr,
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
}
-
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
@@ -1233,7 +1252,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
}
-
+
break;
}
@@ -1332,7 +1351,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
-
+
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
@@ -1355,25 +1374,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
return RValue::get(CI);
}
-
+
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
-
+
if (!DestPtr) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
}
-
+
// If the value is offset in memory, apply the offset now.
llvm::Value *StorePtr = DestPtr;
if (unsigned Offs = RetAI.getDirectOffset()) {
StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
+ StorePtr = Builder.CreateBitCast(StorePtr,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
-
+
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index bf26799b83f3..8e88beb3cf0d 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
@@ -40,7 +41,7 @@ ComputeNonVirtualBaseClassOffset(ASTContext &Context,
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
// Add the offset.
- Offset += Layout.getBaseClassOffset(BaseDecl);
+ Offset += Layout.getBaseClassOffsetInBits(BaseDecl);
RD = BaseDecl;
}
@@ -86,9 +87,9 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
uint64_t Offset;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
if (BaseIsVirtual)
- Offset = Layout.getVBaseClassOffset(Base);
+ Offset = Layout.getVBaseClassOffsetInBits(Base);
else
- Offset = Layout.getBaseClassOffset(Base);
+ Offset = Layout.getBaseClassOffsetInBits(Base);
// Shift and cast down to the base type.
// TODO: for complete types, this should be possible with a GEP.
@@ -179,8 +180,17 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Value *VirtualOffset = 0;
- if (VBase)
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ if (VBase) {
+ if (Derived->hasAttr<FinalAttr>()) {
+ VirtualOffset = 0;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+
+ uint64_t VBaseOffset = Layout.getVBaseClassOffsetInBits(VBase);
+ NonVirtualOffset += VBaseOffset / 8;
+ } else
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ }
// Apply the offsets.
Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
@@ -294,7 +304,8 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
const ASTRecordLayout &Layout =
CGF.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = ForVirtualBase ?
- Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base);
+ Layout.getVBaseClassOffsetInBits(Base) :
+ Layout.getBaseClassOffsetInBits(Base);
SubVTTIndex =
CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
@@ -307,7 +318,7 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
} else {
// We're the complete constructor, so get the VTT by name.
- VTT = CGF.CGM.getVTables().getVTT(RD);
+ VTT = CGF.CGM.getVTables().GetAddrOfVTT(RD);
VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
}
@@ -334,11 +345,34 @@ namespace {
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
}
};
+
+ /// A visitor which checks whether an initializer uses 'this' in a
+ /// way which requires the vtable to be properly set.
+ struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
+ typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
+
+ bool UsesThis;
+
+ DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
+ };
+}
+
+static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
+ DynamicThisUseChecker Checker(C);
+ Checker.Visit(const_cast<Expr*>(Init));
+ return Checker.UsesThis;
}
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXBaseOrMemberInitializer *BaseInit,
+ CXXCtorInitializer *BaseInit,
CXXCtorType CtorType) {
assert(BaseInit->isBaseInitializer() &&
"Must have base initializer!");
@@ -355,6 +389,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
if (CtorType == Ctor_Base && isBaseVirtual)
return;
+ // If the initializer for the base (other than the constructor
+ // itself) accesses 'this' in any way, we need to initialize the
+ // vtables.
+ if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
+ CGF.InitializeVTablePointers(ClassDecl);
+
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
llvm::Value *V =
@@ -362,9 +402,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
- CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(V, false, /*Lifetime*/ true);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor())
+ if (CGF.CGM.getLangOptions().areExceptionsEnabled() &&
+ !BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
}
@@ -372,7 +415,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
llvm::Value *ArrayIndexVar,
- CXXBaseOrMemberInitializer *MemberInit,
+ CXXCtorInitializer *MemberInit,
QualType T,
unsigned Index) {
if (Index == MemberInit->getNumArrayIndices()) {
@@ -388,11 +431,11 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
CGF.Builder.CreateStore(Next, ArrayIndexVar);
}
+
+ AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.isVolatileQualified(),
+ /*Lifetime*/ true);
- CGF.EmitAggExpr(MemberInit->getInit(), Dest,
- LHS.isVolatileQualified(),
- /*IgnoreResult*/ false,
- /*IsInitializer*/ true);
+ CGF.EmitAggExpr(MemberInit->getInit(), Slot);
return;
}
@@ -476,29 +519,29 @@ namespace {
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXBaseOrMemberInitializer *MemberInit,
+ CXXCtorInitializer *MemberInit,
const CXXConstructorDecl *Constructor,
FunctionArgList &Args) {
- assert(MemberInit->isMemberInitializer() &&
+ assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
// non-static data member initializers.
- FieldDecl *Field = MemberInit->getMember();
+ FieldDecl *Field = MemberInit->getAnyMember();
QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS;
// If we are initializing an anonymous union field, drill down to the field.
- if (MemberInit->getAnonUnionMember()) {
- Field = MemberInit->getAnonUnionMember();
- LHS = CGF.EmitLValueForAnonRecordField(ThisPtr, Field, 0);
- FieldType = Field->getType();
+ if (MemberInit->isIndirectMemberInitializer()) {
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
+ MemberInit->getIndirectMember(), 0);
+ FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
} else {
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- // FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
+ // FIXME: If there's no initializer and the CXXCtorInitializer
// was implicitly generated, we shouldn't be zeroing memory.
RValue RHS;
if (FieldType->isReferenceType()) {
@@ -557,12 +600,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// Emit the block variables for the array indices, if any.
for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
- CGF.EmitLocalBlockVarDecl(*MemberInit->getArrayIndex(I));
+ CGF.EmitAutoVarDecl(*MemberInit->getArrayIndex(I));
}
EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
- if (!CGF.Exceptions)
+ if (!CGF.CGM.getLangOptions().areExceptionsEnabled())
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -674,12 +717,12 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
FunctionArgList &Args) {
const CXXRecordDecl *ClassDecl = CD->getParent();
- llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
+ llvm::SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
B != E; ++B) {
- CXXBaseOrMemberInitializer *Member = (*B);
+ CXXCtorInitializer *Member = (*B);
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
@@ -754,6 +797,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
assert(Dtor->isImplicit() && "bodyless dtor not implicit");
// nothing to do besides what's in the epilogue
}
+ // -fapple-kext must inline any call to this dtor into
+ // the caller's body.
+ if (getContext().getLangOptions().AppleKext)
+ CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
break;
}
@@ -1118,6 +1165,64 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
void
+CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+ EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ return;
+ }
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
+ clang::Ctor_Complete);
+ assert(D->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ D->getThisType(getContext())));
+
+
+ // Push the src ptr.
+ QualType QT = *(FPT->arg_type_begin());
+ const llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ Src = Builder.CreateBitCast(Src, t);
+ Args.push_back(std::make_pair(RValue::get(Src), QT));
+
+ // Skip over first argument (Src).
+ ++ArgBeg;
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
+ E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || FPT->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ QualType ResultType = FPT->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FPT->getExtInfo()),
+ Callee, ReturnValueSlot(), Args, D);
+}
+
+void
CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args) {
@@ -1164,7 +1269,13 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *This) {
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
- llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+ llvm::Value *Callee = 0;
+ if (getContext().getLangOptions().AppleKext)
+ Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
+ DD->getParent());
+
+ if (!Callee)
+ Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
@@ -1202,13 +1313,7 @@ llvm::Value *
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
- llvm::Value *VTablePtr = Builder.CreateBitCast(This,
- Int8PtrTy->getPointerTo());
- VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
-
+ llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
int64_t VBaseOffsetOffset =
CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
@@ -1326,15 +1431,16 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(VTableClass);
- BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = Layout.getVBaseClassOffsetInBits(BaseDecl);
BaseOffsetFromNearestVBase = 0;
BaseDeclIsNonVirtualPrimaryBase = false;
} else {
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffset =
+ Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
BaseOffsetFromNearestVBase =
- OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ OffsetFromNearestVBase + Layout.getBaseClassOffsetInBits(BaseDecl);
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
}
@@ -1361,3 +1467,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
/*BaseIsNonVirtualPrimaryBase=*/false,
VTable, RD, VBases);
}
+
+llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
+ const llvm::Type *Ty) {
+ llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+ return Builder.CreateLoad(VTablePtrSrc, "vtable");
+}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
new file mode 100644
index 000000000000..374ede880d20
--- /dev/null
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -0,0 +1,1144 @@
+//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
+ if (rv.isScalar())
+ return DominatingLLVMValue::needsSaving(rv.getScalarVal());
+ if (rv.isAggregate())
+ return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return true;
+}
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
+ if (rv.isScalar()) {
+ llvm::Value *V = rv.getScalarVal();
+
+ // These automatically dominate and don't need to be saved.
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, ScalarLiteral);
+
+ // Everything else needs an alloca.
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, ScalarAddress);
+ }
+
+ if (rv.isComplex()) {
+ CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
+ const llvm::Type *ComplexTy =
+ llvm::StructType::get(CGF.getLLVMContext(),
+ V.first->getType(), V.second->getType(),
+ (void*) 0);
+ llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
+ return saved_type(addr, ComplexAddress);
+ }
+
+ assert(rv.isAggregate());
+ llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, AggregateLiteral);
+
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, AggregateAddress);
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ switch (K) {
+ case ScalarLiteral:
+ return RValue::get(Value);
+ case ScalarAddress:
+ return RValue::get(CGF.Builder.CreateLoad(Value));
+ case AggregateLiteral:
+ return RValue::getAggregate(Value);
+ case AggregateAddress:
+ return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ case ComplexAddress:
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false));
+ }
+
+ llvm_unreachable("bad saved r-value kind");
+ return RValue();
+}
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
+ for (unsigned I = 0; I != NumHandlers; ++I)
+ Scope->getHandlers()[I].Index = getNextEHDestIndex();
+ return Scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope(getNextEHDestIndex());
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void CodeGenFunction::initFullExprCleanup() {
+ // Create a variable to decide whether the cleanup needs to be run.
+ llvm::AllocaInst *active
+ = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+
+ // Initialize it to false at a site that's guaranteed to be run
+ // before each evaluation.
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(Builder.getFalse(), active, &block->back());
+
+ // Initialize it to true at the current location.
+ Builder.CreateStore(Builder.getTrue(), active);
+
+ // Set that as the active flag in the cleanup.
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
+ cleanup.setActiveFlag(active);
+
+ if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
+ if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
+}
+
+EHScopeStack::Cleanup::~Cleanup() {
+ llvm_unreachable("Cleanup is indestructable");
+}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+static void ResolveAllBranchFixups(CodeGenFunction &CGF,
+ llvm::SwitchInst *Switch,
+ llvm::BasicBlock *CleanupEntry) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set.
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+
+ // If there isn't an OptimisticBranchBlock, then InitialBranch is
+ // still pointing directly to its destination; forward it to the
+ // appropriate cleanup entry. This is required in the specific
+ // case of
+ // { std::string s; goto lbl; }
+ // lbl:
+ // i.e. where there's an unresolved fixup inside a single cleanup
+ // entry which we're currently popping.
+ if (Fixup.OptimisticBranchBlock == 0) {
+ new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
+ }
+
+ // Don't add this case to the switch statement twice.
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ CGF.EHStack.clearFixups();
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isEHCleanup());
+ llvm::BasicBlock *Entry = Scope.getEHBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Replace all uses of the entry with the predecessor, in case there
+ // are phis in the cleanup.
+ Entry->replaceAllUsesWith(Pred);
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ bool ForEH,
+ llvm::Value *ActiveFlag) {
+ // EH cleanups always occur within a terminate scope.
+ if (ForEH) CGF.EHStack.pushTerminate();
+
+ // If there's an active flag, load it and skip the cleanup if it's
+ // false.
+ llvm::BasicBlock *ContBB = 0;
+ if (ActiveFlag) {
+ ContBB = CGF.createBasicBlock("cleanup.done");
+ llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+ llvm::Value *IsActive
+ = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+ CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+ CGF.EmitBlock(CleanupBB);
+ }
+
+ // Ask the cleanup to emit itself.
+ Fn->Emit(CGF, ForEH);
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+ // Emit the continuation block if there was an active flag.
+ if (ActiveFlag)
+ CGF.EmitBlock(ContBB);
+
+ // Leave the terminate scope.
+ if (ForEH) CGF.EHStack.popTerminate();
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+ llvm::BasicBlock *From,
+ llvm::BasicBlock *To) {
+ // Exit is the exit block of a cleanup, so it always terminates in
+ // an unconditional branch or a switch.
+ llvm::TerminatorInst *Term = Exit->getTerminator();
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+ Br->setSuccessor(0, To);
+ } else {
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+ for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+ if (Switch->getSuccessor(I) == From)
+ Switch->setSuccessor(I, To);
+ }
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Remember activation information.
+ bool IsActive = Scope.isActive();
+ llvm::Value *NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
+ llvm::Value *EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ bool RequiresEHCleanup = Scope.hasEHBranches();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0 && IsActive);
+
+ // Branch-through fall-throughs leave the insertion point set to the
+ // end of the last cleanup, which points to the current scope. The
+ // rest of IR gen doesn't need to worry about this; it only happens
+ // during the execution of PopCleanupBlocks().
+ bool HasPrebranchedFallthrough =
+ (FallthroughSource && FallthroughSource->getTerminator());
+
+ // If this is a normal cleanup, then having a prebranched
+ // fallthrough implies that the fallthrough source unconditionally
+ // jumps here.
+ assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
+ (Scope.getNormalBlock() &&
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock()));
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // Even if we don't need the normal cleanup, we might still have
+ // prebranched fallthrough to worry about.
+ if (Scope.isNormalCleanup() && !RequiresNormalCleanup &&
+ HasPrebranchedFallthrough) {
+ assert(!IsActive);
+
+ llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+
+ // If we're branching through this cleanup, just forward the
+ // prebranched fallthrough to the next cleanup, leaving the insert
+ // point in the old block.
+ if (FallthroughIsBranchThrough) {
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ llvm::BasicBlock *EnclosingEntry =
+ CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ NormalEntry, EnclosingEntry);
+ assert(NormalEntry->use_empty() &&
+ "uses of entry remain after forwarding?");
+ delete NormalEntry;
+
+ // Otherwise, we're branching out; just emit the next block.
+ } else {
+ EmitBlock(NormalEntry);
+ SimplifyCleanupEntry(*this, NormalEntry);
+ }
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ // We want to emit the EH cleanup after the normal cleanup, but go
+ // ahead and do the setup for the EH cleanup while the scope is still
+ // alive.
+ llvm::BasicBlock *EHEntry = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
+ if (RequiresEHCleanup) {
+ EHEntry = CreateEHEntry(*this, Scope);
+
+ // Figure out the branch-through dest if necessary.
+ llvm::BasicBlock *EHBranchThroughDest = 0;
+ if (Scope.hasEHBranchThroughs()) {
+ assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
+ EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
+ EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ // If we have exactly one branch-after and no branch-throughs, we
+ // can dispatch it without a switch.
+ if (!Scope.hasEHBranchThroughs() &&
+ Scope.getNumEHBranchAfters() == 1) {
+ assert(!EHBranchThroughDest);
+
+ // TODO: remove the spurious eh.cleanup.dest stores if this edge
+ // never went through any switches.
+ llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
+
+ // Otherwise, if we have any branch-afters, we need a switch.
+ } else if (Scope.getNumEHBranchAfters()) {
+ // The default of the switch belongs to the branch-throughs if
+ // they exist.
+ llvm::BasicBlock *Default =
+ (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
+
+ const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ EHInstsToAppend.push_back(Load);
+ EHInstsToAppend.push_back(Switch);
+
+ for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
+ Switch->addCase(Scope.getEHBranchAfterIndex(I),
+ Scope.getEHBranchAfterBlock(I));
+
+ // Otherwise, we have only branch-throughs; jump to the next EH
+ // cleanup.
+ } else {
+ assert(EHBranchThroughDest);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
+ }
+ }
+
+ if (!RequiresNormalCleanup) {
+ EHStack.popCleanup();
+ } else {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ // Fixups can cause us to optimistically create a normal block,
+ // only to later have no real uses for it. Just delete it in
+ // this case.
+ // TODO: we can potentially simplify all the uses after this.
+ if (Scope.getNormalBlock()) {
+ Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
+ delete Scope.getNormalBlock();
+ }
+
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // I. Set up the fallthrough edge in.
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough) {
+ if (!HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Otherwise, clear the IP if we don't have fallthrough because
+ // the cleanup is inactive. We don't need to save it because
+ // it's still just FallthroughSource.
+ } else if (FallthroughSource) {
+ assert(!IsActive && "source without fallthrough for active cleanup");
+ Builder.ClearInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ EmitBlock(NormalEntry);
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (FallthroughSource && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest || !IsActive);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (FallthroughSource && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ if (HasFallthrough)
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ // If there aren't any enclosing cleanups, we can resolve all
+ // the fixups now.
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(*this, Switch, NormalEntry);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // IV. Pop the cleanup and emit it.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but shouldn't branch to
+ // the cleanup because the cleanup is inactive.
+ if (!HasFallthrough && FallthroughSource) {
+ assert(!IsActive);
+
+ // If we have a prebranched fallthrough, that needs to be
+ // forwarded to the right block.
+ if (HasPrebranchedFallthrough) {
+ llvm::BasicBlock *Next;
+ if (FallthroughIsBranchThrough) {
+ Next = BranchThroughDest;
+ assert(!FallthroughDest);
+ } else {
+ Next = FallthroughDest;
+ }
+
+ ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next);
+ }
+ Builder.SetInsertPoint(FallthroughSource);
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (HasFallthrough && FallthroughDest) {
+ assert(!FallthroughIsBranchThrough);
+ EmitBlock(FallthroughDest);
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (HasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ Builder.ClearInsertionPoint();
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+ EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
+ EHExit->getInstList().push_back(EHInstsToAppend[I]);
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
+ // We should never get invalid scope depths for an UnwindDest; that
+ // implies that the destination wasn't set up correctly.
+ assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active cleanup.
+ EHScopeStack::stable_iterator
+ InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
+
+ // If the destination is in the same EH cleanup scope as us, we
+ // don't need to thread through anything.
+ if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
+ Builder.ClearInsertionPoint();
+ return;
+ }
+ assert(InnermostCleanup != EHStack.stable_end());
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
+ BI->setSuccessor(0, CreateEHEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ for (EHScopeStack::stable_iterator
+ I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
+ assert(E.strictlyEncloses(I));
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isEHCleanup());
+ I = Scope.getEnclosingEHCleanup();
+
+ // If this is the last cleanup we're propagating through, add this
+ // as a branch-after.
+ if (I == E) {
+ Scope.addEHBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, add it as a branch-through. If this isn't new
+ // information, all the rest of the work has been done before.
+ if (!Scope.addEHBranchThrough(Dest.getBlock()))
+ break;
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed a normal block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup();
+ I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) return true;
+ I = S.getEnclosingNormalCleanup();
+ }
+
+ return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed an EH block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostEHCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getEHBlock()) return true;
+ I = S.getEnclosingEHCleanup();
+ }
+
+ return false;
+}
+
+enum ForActivation_t {
+ ForActivation,
+ ForDeactivation
+};
+
+/// The given cleanup block is changing activation state. Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+ EHScopeStack::stable_iterator C,
+ ForActivation_t Kind) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+
+ // We always need the flag if we're activating the cleanup, because
+ // we have to assume that the current location doesn't necessarily
+ // dominate all future uses of the cleanup.
+ bool NeedFlag = (Kind == ForActivation);
+
+ // Calculate whether the cleanup was used:
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) {
+ Scope.setTestFlagInNormalCleanup();
+ NeedFlag = true;
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) {
+ Scope.setTestFlagInEHCleanup();
+ NeedFlag = true;
+ }
+
+ // If it hasn't yet been used as either, we're done.
+ if (!NeedFlag) return;
+
+ llvm::AllocaInst *Var = Scope.getActiveFlag();
+ if (!Var) {
+ Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(Var);
+
+ // Initialize to true or false depending on whether it was
+ // active up to this point.
+ CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation));
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var);
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ SetupCleanupBlockActivation(*this, C, ForActivation);
+
+ Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(Scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it.
+ if (C == EHStack.stable_begin()) {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ SetupCleanupBlockActivation(*this, C, ForDeactivation);
+
+ Scope.setActive(false);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
+ if (!EHCleanupDest)
+ EHCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
+ return EHCleanupDest;
+}
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
new file mode 100644
index 000000000000..c93ec5bb76a9
--- /dev/null
+++ b/lib/CodeGen/CGCleanup.h
@@ -0,0 +1,560 @@
+//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for cleanups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCLEANUP_H
+#define CLANG_CODEGEN_CGCLEANUP_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGCleanup.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 2;
+
+protected:
+ enum { BitsRemaining = 30 };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ /// The unwind destination index for this handler.
+ unsigned Index;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ bool IsActive : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ bool TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ bool TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 17; // currently 13
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet.
+ llvm::AllocaInst *ActiveFlag;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+
+ /// The destinations of EH branch-afters and branch-throughs.
+ /// TODO: optimize for the extremely common case of a single
+ /// branch-through.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
+
+ /// EH branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ EHBranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupSize;
+ }
+
+ EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
+ unsigned CleanupSize, unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::Cleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive),
+ TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0)
+ {
+ assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ bool isActive() const { return IsActive; }
+ void setActive(bool A) { IsActive = A; }
+
+ llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
+ void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+
+ void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; }
+ bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; }
+
+ void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; }
+ bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ // Same stuff, only for EH branches instead of normal branches.
+ // It's quite possible that we could find a better representation
+ // for this.
+
+ bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
+ void addEHBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.EHBranches.insert(Block))
+ ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ unsigned getNumEHBranchAfters() const {
+ return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].second;
+ }
+
+ bool addEHBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().EHBranches.insert(Block);
+ }
+
+ bool hasEHBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+ unsigned DestIndex : BitsRemaining;
+public:
+ EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ unsigned getDestIndex() const { return DestIndex; }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveEHCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingEHCleanup();
+ }
+ return stable_end();
+}
+
+}
+}
+
+#endif
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 406db886eeed..469b4605d7ca 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -32,13 +33,14 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
using namespace clang;
using namespace clang::CodeGen;
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
- : CGM(CGM), DebugFactory(CGM.getModule()),
+ : CGM(CGM), DBuilder(CGM.getModule()),
BlockLiteralGenericSet(false) {
CreateCompileUnit();
}
@@ -53,28 +55,27 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
}
/// getContextDescriptor - Get context info for the decl.
-llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
- llvm::DIDescriptor &CompileUnit) {
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!Context)
- return CompileUnit;
+ return TheCU;
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
I = RegionMap.find(Context);
if (I != RegionMap.end())
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
- return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
if (!RDecl->isDependentType()) {
- llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- llvm::DIFile(CompileUnit));
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ getOrCreateMainFile());
return llvm::DIDescriptor(Ty);
}
}
- return CompileUnit;
+ return TheCU;
}
/// getFunctionName - Get function name for the given FunctionDecl. If the
@@ -100,9 +101,14 @@ llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
- if (const ObjCImplementationDecl *OID = dyn_cast<const ObjCImplementationDecl>(DC)) {
+ if (const ObjCImplementationDecl *OID =
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
OS << OID->getName();
- } else if (const ObjCCategoryImplDecl *OCD = dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ } else if (const ObjCInterfaceDecl *OID =
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD =
+ dyn_cast<const ObjCCategoryImplDecl>(DC)){
OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
OCD->getIdentifier()->getNameStart() << ')';
}
@@ -131,8 +137,8 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
NumArgs = TST->getNumArgs();
} else {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- Args = TemplateArgs.getFlatArgumentList();
- NumArgs = TemplateArgs.flat_size();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
}
Buffer = RD->getIdentifier()->getNameStart();
PrintingPolicy Policy(CGM.getLangOptions());
@@ -144,18 +150,21 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
memcpy(StrPtr, Buffer.data(), Buffer.length());
return llvm::StringRef(StrPtr, Buffer.length());
-
}
/// getOrCreateFile - Get the file debug info descriptor for the input location.
llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(),
- TheCU);
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid() || llvm::StringRef(PLoc.getFilename()).empty())
+ // If the location is not valid then use main input file.
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+
// Cache the results.
const char *fname = PLoc.getFilename();
llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
@@ -167,21 +176,25 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return llvm::DIFile(cast<llvm::MDNode>(it->second));
}
- llvm::DIFile F = DebugFactory.CreateFile(PLoc.getFilename(),
- getCurrentDirname(), TheCU);
+ llvm::DIFile F = DBuilder.CreateFile(PLoc.getFilename(), getCurrentDirname());
DIFileCache[fname] = F;
return F;
}
+/// getOrCreateMainFile - Get the file info for main compile unit.
+llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+}
+
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
assert (CurLoc.isValid() && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.getLine();
+ return PLoc.isValid()? PLoc.getLine() : 0;
}
/// getColumnNumber - Get column number for the location. If location is
@@ -190,7 +203,7 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
assert (CurLoc.isValid() && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.getColumn();
+ return PLoc.isValid()? PLoc.getColumn() : 0;
}
llvm::StringRef CGDebugInfo::getCurrentDirname() {
@@ -251,16 +264,17 @@ void CGDebugInfo::CreateCompileUnit() {
RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
// Create new compile unit.
- TheCU = DebugFactory.CreateCompileUnit(
+ DBuilder.CreateCompileUnit(
LangTag, Filename, getCurrentDirname(),
- Producer, true,
+ Producer,
LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+ // FIXME - Eliminate TheCU.
+ TheCU = llvm::DICompileUnit(DBuilder.getCU());
}
/// CreateType - Get the Basic type from the cache or create a new
/// one if necessary.
-llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
const char *BTName = NULL;
switch (BT->getKind()) {
@@ -268,10 +282,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_class", Unit, 0, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ return DBuilder.CreateStructType(TheCU, "objc_class",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
@@ -279,31 +293,31 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
// } *id;
llvm::DIType OCTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_class", Unit, 0, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ DBuilder.CreateStructType(TheCU, "objc_class",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType ISATy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- 0, Size, 0, 0, 0, OCTy);
-
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::DIType ISATy = DBuilder.CreatePointerType(OCTy, Size);
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
llvm::DIType FieldTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "isa", Unit,
- 0,Size, 0, 0, 0, ISATy);
+ DBuilder.CreateMemberType("isa", getOrCreateMainFile(),
+ 0,Size, 0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_object", Unit, 0, 0, 0, 0,
- 0,
- llvm::DIType(), Elements);
+ return DBuilder.CreateStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0, Elements);
+ }
+ case BuiltinType::ObjCSel: {
+ return DBuilder.CreateStructType(TheCU, "objc_selector",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -335,17 +349,12 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
- uint64_t Offset = 0;
-
llvm::DIType DbgTy =
- DebugFactory.CreateBasicType(Unit, BTName,
- Unit, 0, Size, Align,
- Offset, /*flags*/ 0, Encoding);
+ DBuilder.CreateBasicType(BTName, Size, Align, Encoding);
return DbgTy;
}
-llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
// Bit size, align and offset of the type.
unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
@@ -353,12 +362,9 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- uint64_t Offset = 0;
-
llvm::DIType DbgTy =
- DebugFactory.CreateBasicType(Unit, "complex",
- Unit, 0, Size, Align,
- Offset, /*flags*/ 0, Encoding);
+ DBuilder.CreateBasicType("complex", Size, Align, Encoding);
+
return DbgTy;
}
@@ -389,13 +395,12 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
return getOrCreateType(QualType(T, 0), Unit);
}
- llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
- llvm::DIType DbgTy =
- DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
- 0, 0, 0, 0, 0, FromTy);
+ llvm::DIType DbgTy = DBuilder.CreateQualifiedType(Tag, FromTy);
+
return DbgTy;
}
@@ -413,24 +418,56 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
Ty->getPointeeType(), Unit);
}
+/// CreatePointeeType - Create PointTee type. If Pointee is a record
+/// then emit record's fwd if debug info size reduction is enabled.
+llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ return getOrCreateType(PointeeTy, Unit);
+
+ if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
+ RecordDecl *RD = RTy->getDecl();
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
+
+ if (RD->isStruct())
+ return DBuilder.CreateStructType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIArray());
+ else if (RD->isUnion())
+ return DBuilder.CreateUnionType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIArray());
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
+ return DBuilder.CreateClassType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ }
+ }
+ return getOrCreateType(PointeeTy, Unit);
+
+}
+
llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
- llvm::DIType EltTy = getOrCreateType(PointeeTy, Unit);
+ if (Tag == llvm::dwarf::DW_TAG_reference_type)
+ return DBuilder.CreateReferenceType(CreatePointeeType(PointeeTy, Unit));
+
// Bit size, align and offset of the type.
-
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
uint64_t Size =
CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return
- DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
- 0, Size, Align, 0, 0, EltTy);
-
+ return
+ DBuilder.CreatePointerType(CreatePointeeType(PointeeTy, Unit), Size, Align);
}
llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
@@ -438,16 +475,11 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
if (BlockLiteralGenericSet)
return BlockLiteralGeneric;
- unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
-
- llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
-
+ llvm::SmallVector<llvm::Value *, 8> EltTys;
llvm::DIType FieldTy;
-
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
-
llvm::DIArray Elements;
llvm::DIType EltTy, DescTy;
@@ -456,23 +488,20 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
- Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTys.clear();
- unsigned Flags = llvm::DIType::FlagAppleBlock;
+ unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
unsigned LineNo = getLineNumber(CurLoc);
- EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
- Unit, LineNo, FieldOffset, 0, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.CreateStructType(Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- LineNo, Size, Align, 0, 0, EltTy);
+ DescTy = DBuilder.CreatePointerType(EltTy, Size);
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
@@ -487,24 +516,20 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__descriptor", Unit,
- LineNo, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.CreateMemberType("__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
- Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
- Unit, LineNo, FieldOffset, 0, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.CreateStructType(Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
BlockLiteralGenericSet = true;
- BlockLiteralGeneric
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
- "", Unit,
- LineNo, Size, Align, 0, 0, EltTy);
+ BlockLiteralGeneric = DBuilder.CreatePointerType(EltTy, Size);
return BlockLiteralGeneric;
}
@@ -513,46 +538,36 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
-
+ if (!Src.Verify())
+ return llvm::DIType();
// We don't set size information, but do specify where the typedef was
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
-
- llvm::DIDescriptor TyContext
- = getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
- Unit);
- llvm::DIType DbgTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef,
- TyContext,
- Ty->getDecl()->getName(), Unit,
- Line, 0, 0, 0, 0, Src);
+ llvm::DIType DbgTy = DBuilder.CreateTypedef(Src, Ty->getDecl()->getName(),
+ Unit, Line);
return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile Unit) {
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
// Set up remainder of arguments if there is a prototype.
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
- if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ if (isa<FunctionNoProtoType>(Ty))
+ EltTys.push_back(DBuilder.CreateUnspecifiedParameter());
+ else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
- } else {
- // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
}
llvm::DIArray EltTypeArray =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0,
- llvm::DIType(), EltTypeArray);
+ llvm::DIType DbgTy = DBuilder.CreateSubroutineType(Unit, EltTypeArray);
return DbgTy;
}
@@ -560,7 +575,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
/// record fields. This is used while creating debug info entry for a Record.
void CGDebugInfo::
CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys) {
unsigned FieldNo = 0;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (RecordDecl::field_iterator I = RD->field_begin(),
@@ -595,17 +610,13 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
unsigned Flags = 0;
AccessSpecifier Access = I->getAccess();
if (Access == clang::AS_private)
- Flags |= llvm::DIType::FlagPrivate;
+ Flags |= llvm::DIDescriptor::FlagPrivate;
else if (Access == clang::AS_protected)
- Flags |= llvm::DIType::FlagProtected;
-
- // Create a DW_TAG_member node to remember the offset of this field in the
- // struct. FIXME: This is an absolutely insane way to capture this
- // information. When we gut debug info, this should be fixed.
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy);
+ Flags |= llvm::DIDescriptor::FlagProtected;
+
+ FieldTy = DBuilder.CreateMemberType(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
}
@@ -621,19 +632,12 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
0),
Unit);
- unsigned BFlags=0;
- AccessSpecifier Access = Method->getAccess();
- if (Access == clang::AS_private)
- BFlags |= llvm::DIType::FlagPrivate;
- else if (Access == clang::AS_protected)
- BFlags |= llvm::DIType::FlagProtected;
-
// Add "this" pointer.
llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
- llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
+ llvm::SmallVector<llvm::Value *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
@@ -645,7 +649,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
QualType ThisPtr =
Context.getPointerType(Context.getTagDeclType(Method->getParent()));
llvm::DIType ThisPtrType =
- DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+ DBuilder.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
@@ -656,15 +660,22 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Elts.push_back(Args.getElement(i));
llvm::DIArray EltTypeArray =
- DebugFactory.GetOrCreateArray(Elts.data(), Elts.size());
+ DBuilder.GetOrCreateArray(Elts.data(), Elts.size());
- return
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0,
- llvm::DIType(), EltTypeArray);
+ return DBuilder.CreateSubroutineType(Unit, EltTypeArray);
}
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// inside a function.
+static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
+ if (const CXXRecordDecl *NRD =
+ dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ return isFunctionLocalClass(NRD);
+ else if (isa<FunctionDecl>(RD->getDeclContext()))
+ return true;
+ return false;
+
+}
/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
/// a single member function GlobalDecl.
llvm::DISubprogram
@@ -680,7 +691,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
llvm::StringRef MethodLinkageName;
- if (!IsCtorOrDtor)
+ if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
@@ -705,15 +716,32 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
ContainingType = RecordTy;
}
+ unsigned Flags = 0;
+ if (Method->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIDescriptor::FlagProtected;
+ if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ } else if (const CXXConversionDecl *CXXC =
+ dyn_cast<CXXConversionDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ }
+ if (Method->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+
llvm::DISubprogram SP =
- DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName,
- MethodLinkageName,
- MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false,
- /* isDefintion=*/ false,
- Virtuality, VIndex, ContainingType,
- Method->isImplicit(),
- CGM.getLangOptions().Optimize);
+ DBuilder.CreateMethod(RecordTy , MethodName, MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/ false,
+ Virtuality, VIndex, ContainingType,
+ Flags, CGM.getLangOptions().Optimize);
// Don't cache ctors or dtors since we have to emit multiple functions for
// a single ctor or dtor.
@@ -728,7 +756,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
for(CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
@@ -746,26 +774,15 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
BE = RD->friend_end(); BI != BE; ++BI) {
-
- TypeSourceInfo *TInfo = (*BI)->getFriendType();
- if(TInfo)
- {
- llvm::DIType Ty = getOrCreateType(TInfo->getType(), Unit);
-
- llvm::DIType DTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_friend,
- RecordTy, llvm::StringRef(),
- Unit, 0, 0, 0,
- 0, 0, Ty);
-
- EltTys.push_back(DTy);
- }
-
+ if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
+ EltTys.push_back(DBuilder.CreateFriend(RecordTy,
+ getOrCreateType(TInfo->getType(),
+ Unit)));
}
}
@@ -774,7 +791,7 @@ CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
@@ -790,23 +807,20 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
- BFlags = llvm::DIType::FlagVirtual;
+ BFlags = llvm::DIDescriptor::FlagVirtual;
} else
- BaseOffset = RL.getBaseClassOffset(Base);
+ BaseOffset = RL.getBaseClassOffsetInBits(Base);
AccessSpecifier Access = BI->getAccessSpecifier();
if (Access == clang::AS_private)
- BFlags |= llvm::DIType::FlagPrivate;
+ BFlags |= llvm::DIDescriptor::FlagPrivate;
else if (Access == clang::AS_protected)
- BFlags |= llvm::DIType::FlagProtected;
+ BFlags |= llvm::DIDescriptor::FlagProtected;
- llvm::DIType DTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
- RecordTy, llvm::StringRef(),
- Unit, 0, 0, 0,
- BaseOffset, BFlags,
- getOrCreateType(BI->getType(),
- Unit));
+ llvm::DIType DTy =
+ DBuilder.CreateInheritance(RecordTy,
+ getOrCreateType(BI->getType(), Unit),
+ BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
@@ -819,23 +833,13 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
ASTContext &Context = CGM.getContext();
/* Function type */
- llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit);
- llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1);
- llvm::DIType SubTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0, llvm::DIType(), SElements);
-
+ llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DBuilder.GetOrCreateArray(&STy, 1);
+ llvm::DIType SubTy = DBuilder.CreateSubroutineType(Unit, SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
- llvm::DIType vtbl_ptr_type
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "__vtbl_ptr_type", Unit,
- 0, Size, 0, 0, 0, SubTy);
-
- VTablePtrType =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- 0, Size, 0, 0, 0, vtbl_ptr_type);
+ llvm::DIType vtbl_ptr_type = DBuilder.CreatePointerType(SubTy, Size, 0,
+ "__vtbl_ptr_type");
+ VTablePtrType = DBuilder.CreatePointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -855,7 +859,7 @@ llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
/// debug info entry in EltTys vector.
void CGDebugInfo::
CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
// If there is a primary base then it will hold vtable info.
@@ -868,27 +872,24 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- getVTableName(RD), Unit,
- 0, Size, 0, 0, 0,
- getOrCreateVTablePtrType(Unit));
+ = DBuilder.CreateMemberType(getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
}
+/// getOrCreateRecordType - Emit record type's standalone debug info.
+llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ DBuilder.RetainType(T);
+ return T;
+}
+
/// CreateType - get structure or union type.
-llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
-
- unsigned Tag;
- if (RD->isStruct())
- Tag = llvm::dwarf::DW_TAG_structure_type;
- else if (RD->isUnion())
- Tag = llvm::dwarf::DW_TAG_union_type;
- else {
- assert(RD->isClass() && "Unknown RecordType!");
- Tag = llvm::dwarf::DW_TAG_class_type;
- }
+ llvm::DIFile Unit = getOrCreateFile(RD->getLocation());
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
@@ -901,21 +902,21 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
llvm::DIDescriptor FDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
if (!RD->getDefinition()) {
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(),
- DefUnit, Line, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ llvm::DIType FwdDecl =
+ DBuilder.CreateStructType(FDContext, RD->getName(),
+ DefUnit, Line, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
return FwdDecl;
}
- llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+ llvm::DIType FwdDecl = DBuilder.CreateTemporaryType(DefUnit);
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -927,7 +928,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
@@ -951,36 +952,41 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
// Do not use DIGlobalVariable for enums.
if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
- DebugFactory.CreateGlobalVariable(FwdDecl, VName, VName, VName, VUnit,
- getLineNumber(V->getLocation()),
- VTy, true, true, CI);
+ DBuilder.CreateStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
}
}
}
}
CollectRecordFields(RD, Unit, EltTys);
- llvm::MDNode *ContainingType = NULL;
+ llvm::SmallVector<llvm::Value *, 16> TemplateParams;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
-
- // A class's primary base or the class itself contains the vtable.
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
- ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
- else if (CXXDecl->isDynamicClass())
- ContainingType = FwdDecl;
+ if (ClassTemplateSpecializationDecl *TSpecial
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ const TemplateArgumentList &TAL = TSpecial->getTemplateArgs();
+ for (unsigned i = 0, e = TAL.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAL[i];
+ if (TA.getKind() == TemplateArgument::Type) {
+ llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
+ llvm::DITemplateTypeParameter TTP =
+ DBuilder.CreateTemplateTypeParameter(TheCU, TTy.getName(), TTy);
+ TemplateParams.push_back(TTP);
+ } else if (TA.getKind() == TemplateArgument::Integral) {
+ llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ // FIXME: Get parameter name, instead of parameter type name.
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.CreateTemplateValueParameter(TheCU, TTy.getName(), TTy,
+ TA.getAsIntegral()->getZExtValue());
+ TemplateParams.push_back(TVP);
+ }
+ }
+ }
}
- llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
-
- // Bit size, align and offset of the type.
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
-
RegionStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
RegionMap.find(Ty->getDecl());
@@ -988,25 +994,54 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
RegionMap.erase(RI);
llvm::DIDescriptor RDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
-
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
llvm::StringRef RDName = RD->getName();
- // If this is a class, include the template arguments also.
- if (Tag == llvm::dwarf::DW_TAG_class_type)
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ llvm::DIArray Elements =
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::MDNode *RealDecl = NULL;
+
+ if (RD->isStruct())
+ RealDecl = DBuilder.CreateStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, Elements);
+ else if (RD->isUnion())
+ RealDecl = DBuilder.CreateUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, Elements);
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
RDName = getClassName(RD);
-
- llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(Tag, RDContext,
- RDName,
- DefUnit, Line, Size, Align, 0, 0,
- llvm::DIType(), Elements,
- 0, ContainingType);
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = FwdDecl;
+ llvm::DIArray TParamsArray =
+ DBuilder.GetOrCreateArray(TemplateParams.data(), TemplateParams.size());
+ RealDecl = DBuilder.CreateClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ Elements, ContainingType,
+ TParamsArray);
+ }
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
RegionMap[RD] = llvm::WeakVH(RealDecl);
- return RealDecl;
+ return llvm::DIType(RealDecl);
}
/// CreateType - get objective-c object type.
@@ -1020,7 +1055,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIFile Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
- unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+ if (!ID)
+ return llvm::DIType();
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
@@ -1030,11 +1066,10 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// If this is just a forward declaration, return a special forward-declaration
// debug type.
if (ID->isForwardDecl()) {
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
- DefUnit, Line, 0, 0, 0, 0,
- llvm::DIType(), llvm::DIArray(),
- RuntimeLang);
+ llvm::DIType FwdDecl =
+ DBuilder.CreateStructType(Unit, ID->getName(),
+ DefUnit, Line, 0, 0, 0,
+ llvm::DIArray(), RuntimeLang);
return FwdDecl;
}
@@ -1044,7 +1079,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+ llvm::DIType FwdDecl = DBuilder.CreateTemporaryType(DefUnit);
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -1056,27 +1091,29 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
llvm::DIType SClassTy =
getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ if (!SClassTy.isValid())
+ return llvm::DIType();
+
llvm::DIType InhTag =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
- Unit, "", Unit, 0, 0, 0,
- 0 /* offset */, 0, SClassTy);
+ DBuilder.CreateInheritance(FwdDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
unsigned FieldNo = 0;
- for (ObjCInterfaceDecl::ivar_iterator I = ID->ivar_begin(),
- E = ID->ivar_end(); I != E; ++I, ++FieldNo) {
- ObjCIvarDecl *Field = *I;
+ for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
+ Field = Field->getNextIvar(), ++FieldNo) {
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
-
+ if (!FieldTy.isValid())
+ return llvm::DIType();
+
llvm::StringRef FieldName = Field->getName();
// Ignore unnamed fields.
@@ -1105,22 +1142,18 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
- Flags = llvm::DIType::FlagProtected;
+ Flags = llvm::DIDescriptor::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
- Flags = llvm::DIType::FlagPrivate;
-
- // Create a DW_TAG_member node to remember the offset of this field in the
- // struct. FIXME: This is an absolutely insane way to capture this
- // information. When we gut debug info, this should be fixed.
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy);
+ Flags = llvm::DIDescriptor::FlagPrivate;
+
+ FieldTy = DBuilder.CreateMemberType(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
RegionStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
@@ -1132,10 +1165,10 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(), DefUnit,
- Line, Size, Align, 0, 0, llvm::DIType(),
- Elements, RuntimeLang);
+ llvm::DIType RealDecl =
+ DBuilder.CreateStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, 0,
+ Elements, RuntimeLang);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
@@ -1145,18 +1178,11 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return RealDecl;
}
-llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
- llvm::DIFile Unit) {
- return CreateEnumType(Ty->getDecl(), Unit);
-
-}
-
-llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty) {
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return CreateType(RT, Unit);
+ return CreateType(RT);
else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return CreateType(ET, Unit);
+ return CreateEnumType(ET->getDecl());
return llvm::DIType();
}
@@ -1168,17 +1194,14 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
if (NumElems > 0)
--NumElems;
- llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems);
- llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1);
+ llvm::Value *Subscript = DBuilder.GetOrCreateSubrange(0, NumElems);
+ llvm::DIArray SubscriptArray = DBuilder.GetOrCreateArray(&Subscript, 1);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
return
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
- Unit, "", Unit,
- 0, Size, Align, 0, 0,
- ElementTy, SubscriptArray);
+ DBuilder.CreateVectorType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
@@ -1204,27 +1227,28 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
- llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ llvm::SmallVector<llvm::Value *, 8> Subscripts;
QualType EltTy(Ty, 0);
- while ((Ty = dyn_cast<ArrayType>(EltTy))) {
- uint64_t Upper = 0;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- if (CAT->getSize().getZExtValue())
- Upper = CAT->getSize().getZExtValue() - 1;
- // FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ if (Ty->isIncompleteArrayType())
EltTy = Ty->getElementType();
+ else {
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ if (CAT->getSize().getZExtValue())
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
}
llvm::DIArray SubscriptArray =
- DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+ DBuilder.GetOrCreateArray(Subscripts.data(), Subscripts.size());
llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
- Unit, "", Unit,
- 0, Size, Align, 0, 0,
- getOrCreateType(EltTy, Unit),
- SubscriptArray);
+ DBuilder.CreateArrayType(Size, Align, getOrCreateType(EltTy, Unit),
+ SubscriptArray);
return DbgTy;
}
@@ -1234,6 +1258,12 @@ llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
Ty, Ty->getPointeeType(), Unit);
}
+llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile U) {
QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
@@ -1249,47 +1279,46 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
uint64_t FieldOffset = 0;
- llvm::DIDescriptor ElementTypes[2];
+ llvm::Value *ElementTypes[2];
// FIXME: This should probably be a function type instead.
ElementTypes[0] =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
- "ptr", U, 0,
- Info.first, Info.second, FieldOffset, 0,
- PointerDiffDITy);
+ DBuilder.CreateMemberType("ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
FieldOffset += Info.first;
ElementTypes[1] =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
- "ptr", U, 0,
- Info.first, Info.second, FieldOffset, 0,
- PointerDiffDITy);
+ DBuilder.CreateMemberType("ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(&ElementTypes[0],
- llvm::array_lengthof(ElementTypes));
+ DBuilder.GetOrCreateArray(&ElementTypes[0],
+ llvm::array_lengthof(ElementTypes));
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- U, llvm::StringRef("test"),
- U, 0, FieldOffset,
- 0, 0, 0, llvm::DIType(), Elements);
+ return DBuilder.CreateStructType(U, llvm::StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, Elements);
}
/// CreateEnumType - get enumeration type.
-llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
- llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
+ llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
+ llvm::SmallVector<llvm::Value *, 16> Enumerators;
// Create DIEnumerator elements for each enumerator.
for (EnumDecl::enumerator_iterator
Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
Enum != EnumEnd; ++Enum) {
- Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
- Enum->getInitVal().getZExtValue()));
+ Enumerators.push_back(
+ DBuilder.CreateEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
}
// Return a CompositeType for the enum itself.
llvm::DIArray EltArray =
- DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+ DBuilder.GetOrCreateArray(Enumerators.data(), Enumerators.size());
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
@@ -1299,11 +1328,11 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
}
+ llvm::DIDescriptor EnumContext =
+ getContextDescriptor(dyn_cast<Decl>(ED->getDeclContext()));
llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
- Unit, ED->getName(), DefUnit, Line,
- Size, Align, 0, 0,
- llvm::DIType(), EltArray);
+ DBuilder.CreateEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray);
return DbgTy;
}
@@ -1316,20 +1345,23 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::TemplateSpecialization:
T = cast<TemplateSpecializationType>(T)->desugar();
break;
- case Type::TypeOfExpr: {
- TypeOfExprType *Ty = cast<TypeOfExprType>(T);
- T = Ty->getUnderlyingExpr()->getType();
+ case Type::TypeOfExpr:
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
- }
case Type::TypeOf:
T = cast<TypeOfType>(T)->getUnderlyingType();
break;
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
break;
+ case Type::Attributed:
+ T = cast<AttributedType>(T)->getEquivalentType();
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
+ case Type::Paren:
+ T = cast<ParenType>(T)->getInnerType();
+ break;
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
@@ -1400,15 +1432,15 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
- case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
- case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Builtin: return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex: return CreateType(cast<ComplexType>(Ty));
case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
case Type::Enum:
- return CreateType(cast<TagType>(Ty), Unit);
+ return CreateType(cast<TagType>(Ty));
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
@@ -1419,29 +1451,29 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::LValueReference:
return CreateType(cast<LValueReferenceType>(Ty), Unit);
+ case Type::RValueReference:
+ return CreateType(cast<RValueReferenceType>(Ty), Unit);
case Type::MemberPointer:
return CreateType(cast<MemberPointerType>(Ty), Unit);
+ case Type::Attributed:
case Type::TemplateSpecialization:
case Type::Elaborated:
+ case Type::Paren:
case Type::SubstTemplateTypeParm:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::Auto:
llvm_unreachable("type should have been unwrapped!");
- return llvm::DIType();
-
- case Type::RValueReference:
- // FIXME: Implement!
- Diag = "rvalue references";
- break;
+ return llvm::DIType();
}
assert(Diag && "Fall through without a diagnostic?");
unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
"debug information for %0 is not yet supported");
- CGM.getDiags().Report(FullSourceLoc(), DiagID)
+ CGM.getDiags().Report(DiagID)
<< Diag;
return llvm::DIType();
}
@@ -1453,10 +1485,9 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
- Unit, Name, Unit, 0,
- FieldSize, FieldAlign,
- *Offset, 0, FieldTy);
+ llvm::DIType Ty = DBuilder.CreateMemberType(Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
*Offset += FieldSize;
return Ty;
}
@@ -1473,12 +1504,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(RegionStack.size());
const Decl *D = GD.getDecl();
+ unsigned Flags = 0;
+ llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ llvm::DIDescriptor FDContext(Unit);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
FI = SPCache.find(FD);
if (FI != SPCache.end()) {
- llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
RegionStack.push_back(SPN);
@@ -1489,13 +1523,20 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
LinkageName = CGM.getMangledName(GD);
+ if (LinkageName == Name)
+ LinkageName = llvm::StringRef();
+ if (FD->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
- LinkageName = Name;
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
} else {
- // Use llvm function name as linkage name.
+ // Use llvm function name.
Name = Fn->getName();
- LinkageName = Name;
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
}
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
@@ -1503,16 +1544,14 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// It is expected that CurLoc is set before using EmitFunctionStart.
// Usually, CurLoc points to the left bracket location of compound
// statement representing function body.
- llvm::DIFile Unit = getOrCreateFile(CurLoc);
unsigned LineNo = getLineNumber(CurLoc);
-
+ if (D->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::DISubprogram SP =
- DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
- getOrCreateType(FnType, Unit),
- Fn->hasInternalLinkage(), true/*definition*/,
- 0, 0, llvm::DIType(),
- D->isImplicit(),
- CGM.getLangOptions().Optimize, Fn);
+ DBuilder.CreateFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, getOrCreateType(FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/,
+ Flags, CGM.getLangOptions().Optimize, Fn);
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -1556,7 +1595,8 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
- if (!strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
+ !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
return;
// If #line directive stack is empty then we are entering a new scope.
@@ -1570,9 +1610,10 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
&& "error handling #line regions!");
bool SeenThisFile = false;
+ // Chek if current file is already seen earlier.
for(std::vector<const char *>::iterator I = LineDirectiveFiles.begin(),
E = LineDirectiveFiles.end(); I != E; ++I)
- if (!strcmp(PPLoc.getFilename(), *I)) {
+ if (!strcmp(PCLoc.getFilename(), *I)) {
SeenThisFile = true;
break;
}
@@ -1599,12 +1640,12 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
/// region - "llvm.dbg.region.start.".
void CGDebugInfo::EmitRegionStart(CGBuilderTy &Builder) {
llvm::DIDescriptor D =
- DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
- llvm::DIDescriptor() :
- llvm::DIDescriptor(RegionStack.back()),
- getOrCreateFile(CurLoc),
- getLineNumber(CurLoc),
- getColumnNumber(CurLoc));
+ DBuilder.CreateLexicalBlock(RegionStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(RegionStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
RegionStack.push_back(DN);
}
@@ -1637,8 +1678,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
uint64_t *XOffset) {
- llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
-
+ llvm::SmallVector<llvm::Value *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
@@ -1654,7 +1694,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
- bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+ bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
@@ -1685,24 +1725,21 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FieldAlign = Align.getQuantity()*8;
*XOffset = FieldOffset;
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- VD->getName(), Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.CreateMemberType(VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
-
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "", Unit,
- 0, FieldOffset, 0, 0, Flags,
- llvm::DIType(), Elements);
+ unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
+ return DBuilder.CreateStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
+ Elements);
}
+
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage, CGBuilderTy &Builder) {
@@ -1721,37 +1758,119 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (!Ty)
return;
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) {
+ // If Storage is an aggregate returned as 'sret' then let debugger know
+ // about this.
+ if (Arg->hasStructRetAttr())
+ Ty = DBuilder.CreateReferenceType(Ty);
+ else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
+ // If an aggregate variable has non trivial destructor or non trivial copy
+ // constructor than it is pass indirectly. Let debug info know about this
+ // by using reference of the aggregate type as a argument type.
+ if (!Record->hasTrivialCopyConstructor() || !Record->hasTrivialDestructor())
+ Ty = DBuilder.CreateReferenceType(Ty);
+ }
+ }
+
// Get location information.
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
-
- // Create the descriptor for the variable.
- llvm::DIVariable D =
- DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
- VD->getName(),
- Unit, Line, Ty, CGM.getLangOptions().Optimize);
- // Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
-
+ unsigned Flags = 0;
+ if (VD->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::MDNode *Scope = RegionStack.back();
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+
+ llvm::StringRef Name = VD->getName();
+ if (!Name.empty()) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ CharUnits offset = CharUnits::fromQuantity(32);
+ llvm::SmallVector<llvm::Value *, 9> addr;
+ const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset =
+ CharUnits::fromQuantity(CGM.getContext().Target.getPointerWidth(0)/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CharUnits::fromQuantity(XOffset/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.CreateComplexVariable(Tag,
+ llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr.data(), addr.size());
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.CreateLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ CGM.getLangOptions().Optimize, Flags);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // If VD is an anonymous union then Storage represents value for
+ // all union fields.
+ if (const RecordType *RT = dyn_cast<RecordType>(VD->getType()))
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(RT->getDecl()))
+ if (RD->isUnion()) {
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Use VarDecl's Tag, Scope and Line number.
+ llvm::DIVariable D =
+ DBuilder.CreateLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ FieldName, Unit, Line, FieldTy,
+ CGM.getLangOptions().Optimize, Flags);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ }
+ }
}
/// EmitDeclare - Emit local variable declaration debug info.
-void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage, CGBuilderTy &Builder,
- CodeGenFunction *CGF) {
- const ValueDecl *VD = BDRE->getDecl();
+ const CGBlockInfo &blockInfo) {
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
return;
+ bool isByRef = VD->hasAttr<BlocksAttr>();
+
uint64_t XOffset = 0;
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
- if (VD->hasAttr<BlocksAttr>())
+ if (isByRef)
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
else
Ty = getOrCreateType(VD->getType(), Unit);
@@ -1760,20 +1879,25 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
- CharUnits offset = CGF->BlockDecls[VD];
+ const llvm::TargetData &target = CGM.getTargetData();
+
+ CharUnits offset = CharUnits::fromQuantity(
+ target.getStructLayout(blockInfo.StructureType)
+ ->getElementOffset(blockInfo.getCapture(VD).getIndex()));
+
llvm::SmallVector<llvm::Value *, 9> addr;
const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- if (BDRE->isByRef()) {
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ if (isByRef) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
- offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
+ offset = CharUnits::fromQuantity(target.getPointerSize()/8);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of x field
offset = CharUnits::fromQuantity(XOffset/8);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
@@ -1781,13 +1905,12 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
// Create the descriptor for the variable.
llvm::DIVariable D =
- DebugFactory.CreateComplexVariable(Tag,
- llvm::DIDescriptor(RegionStack.back()),
- VD->getName(), Unit, Line, Ty,
- addr);
+ DBuilder.CreateComplexVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr.data(), addr.size());
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
- DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
llvm::MDNode *Scope = RegionStack.back();
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
@@ -1800,9 +1923,10 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
}
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
- const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
- CodeGenFunction *CGF) {
- EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+ const VarDecl *variable, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ EmitDeclare(variable, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder,
+ blockInfo);
}
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
@@ -1836,14 +1960,16 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
}
llvm::StringRef DeclName = D->getName();
llvm::StringRef LinkageName;
- if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()))
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext())
+ && !isa<ObjCMethodDecl>(D->getDeclContext()))
LinkageName = Var->getName();
+ if (LinkageName == DeclName)
+ LinkageName = llvm::StringRef();
llvm::DIDescriptor DContext =
- getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
- DebugFactory.CreateGlobalVariable(DContext, DeclName, DeclName, LinkageName,
- Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasInternalLinkage(),
- true/*definition*/, Var);
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ DBuilder.CreateStaticVariable(DContext, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
}
/// EmitGlobalVariable - Emit information about an objective-c interface.
@@ -1868,49 +1994,45 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ArrayType::Normal, 0);
}
- DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit, LineNo,
- getOrCreateType(T, Unit),
- Var->hasInternalLinkage(),
- true/*definition*/, Var);
+ DBuilder.CreateGlobalVariable(Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
}
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
- llvm::ConstantInt *Init,
- CGBuilderTy &Builder) {
+ llvm::Constant *Init) {
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::StringRef Name = VD->getName();
llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
- Ty = CreateEnumType(ED, Unit);
+ Ty = CreateEnumType(ED);
}
// Do not use DIGlobalVariable for enums.
if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
return;
- DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit,
- getLineNumber(VD->getLocation()),
- Ty, true, true, Init);
+ DBuilder.CreateStaticVariable(Unit, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, Init);
}
/// getOrCreateNamesSpace - Return namespace descriptor for the given
/// namespace decl.
llvm::DINameSpace
-CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
- llvm::DIDescriptor Unit) {
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
NameSpaceCache.find(NSDecl);
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
unsigned LineNo = getLineNumber(NSDecl->getLocation());
-
+ llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation());
llvm::DIDescriptor Context =
- getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
llvm::DINameSpace NS =
- DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
- llvm::DIFile(Unit), LineNo);
+ DBuilder.CreateNameSpace(Context, NSDecl->getName(), FileD, LineNo);
NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
return NS;
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index a1ad012353e5..6a9ab9c58b52 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
@@ -36,13 +37,14 @@ namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
class GlobalDecl;
+ class CGBlockInfo;
/// CGDebugInfo - This class gathers all debug information during compilation
/// and is responsible for emitting to llvm globals or pass directly to
/// the backend.
class CGDebugInfo {
CodeGenModule &CGM;
- llvm::DIFactory DebugFactory;
+ llvm::DIBuilder DBuilder;
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
@@ -74,8 +76,8 @@ class CGDebugInfo {
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
/// Helper functions for getOrCreateType.
- llvm::DIType CreateType(const BuiltinType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const ComplexType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BuiltinType *Ty);
+ llvm::DIType CreateType(const ComplexType *Ty);
llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
@@ -83,22 +85,21 @@ class CGDebugInfo {
llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const TagType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const RecordType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TagType *Ty);
+ llvm::DIType CreateType(const RecordType *Ty);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const EnumType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
- llvm::DIType CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit);
+ llvm::DIType CreateEnumType(const EnumDecl *ED);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
- llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N,
- llvm::DIDescriptor Unit);
-
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
+ llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
llvm::DIType CreatePointerLikeType(unsigned Tag,
const Type *Ty, QualType PointeeTy,
llvm::DIFile F);
@@ -109,26 +110,26 @@ class CGDebugInfo {
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+ llvm::SmallVectorImpl<llvm::Value *> &E,
llvm::DIType T);
void CollectCXXFriends(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
void CollectCXXBases(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
+ llvm::SmallVectorImpl<llvm::Value *> &E);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys);
public:
CGDebugInfo(CodeGenModule &CGM);
@@ -169,10 +170,10 @@ public:
/// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
/// imported variable declaration in a block.
- void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
- llvm::Value *AI,
+ void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
+ llvm::Value *storage,
CGBuilderTy &Builder,
- CodeGenFunction *CGF);
+ const CGBlockInfo &blockInfo);
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
@@ -186,17 +187,19 @@ public:
void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
/// EmitGlobalVariable - Emit global variable's debug info.
- void EmitGlobalVariable(const ValueDecl *VD, llvm::ConstantInt *Init,
- CGBuilderTy &Builder);
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+ /// getOrCreateRecordType - Emit record type's standalone debug info.
+ llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
CGBuilderTy &Builder);
- /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
- void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
- CGBuilderTy &Builder, CodeGenFunction *CGF);
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable
+ /// declaration from an enclosing block.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder, const CGBlockInfo &blockInfo);
// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
@@ -204,8 +207,7 @@ private:
uint64_t *OffSet);
/// getContextDescriptor - Get context info for the decl.
- llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
- llvm::DIDescriptor &CU);
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
/// getCurrentDirname - Return current directory name.
llvm::StringRef getCurrentDirname();
@@ -217,6 +219,9 @@ private:
/// location.
llvm::DIFile getOrCreateFile(SourceLocation Loc);
+ /// getOrCreateMainFile - Get the file info for main compile unit.
+ llvm::DIFile getOrCreateMainFile();
+
/// getOrCreateType - Get the type from the cache or create a new type if
/// necessary.
llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
@@ -232,6 +237,7 @@ private:
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
llvm::StringRef getFunctionName(const FunctionDecl *FD);
+
/// getObjCMethodName - Returns the unmangled name of an Objective-C method.
/// This is the display name for the debugging info.
llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 57e5236c67e5..a87dfaed9d5e 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -44,6 +45,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::Field:
+ case Decl::IndirectField:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::ParmVar:
@@ -68,7 +70,6 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
-
assert(0 && "Declaration not should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
@@ -80,14 +81,15 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::UsingDirective: // using namespace X; [C++]
case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ case Decl::Label: // __label__ x;
// None of these decls require codegen support.
return;
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
- assert(VD.isBlockVarDecl() &&
+ assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
- return EmitBlockVarDecl(VD);
+ return EmitVarDecl(VD);
}
case Decl::Typedef: { // typedef int X;
@@ -100,17 +102,14 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
}
}
-/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
-void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
- if (D.hasAttr<AsmLabelAttr>())
- CGM.ErrorUnsupported(&D, "__asm__");
-
+void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
switch (D.getStorageClass()) {
case SC_None:
case SC_Auto:
case SC_Register:
- return EmitLocalBlockVarDecl(D);
+ return EmitAutoVarDecl(D);
case SC_Static: {
llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
@@ -124,7 +123,7 @@ void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
- return EmitStaticBlockVarDecl(D, Linkage);
+ return EmitStaticVarDecl(D, Linkage);
}
case SC_Extern:
case SC_PrivateExtern:
@@ -144,22 +143,32 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
}
std::string ContextName;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ if (!CGF.CurFuncDecl) {
+ // Better be in a block declared in global scope.
+ const NamedDecl *ND = cast<NamedDecl>(&D);
+ const DeclContext *DC = ND->getDeclContext();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ MangleBuffer Name;
+ CGM.getBlockMangledName(GlobalDecl(), Name, BD);
+ ContextName = Name.getString();
+ }
+ else
+ assert(0 && "Unknown context for block static var decl");
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
llvm::StringRef Name = CGM.getMangledName(FD);
ContextName = Name.str();
} else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
ContextName = CGF.CurFn->getName();
else
- // FIXME: What about in a block??
- assert(0 && "Unknown context for block var decl");
+ assert(0 && "Unknown context for static var decl");
return ContextName + Separator + D.getNameAsString();
}
llvm::GlobalVariable *
-CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
- const char *Separator,
- llvm::GlobalValue::LinkageTypes Linkage) {
+CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
@@ -172,16 +181,18 @@ CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
CGM.EmitNullConstant(D.getType()), Name, 0,
D.isThreadSpecified(), Ty.getAddressSpace());
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GV->setVisibility(CurFn->getVisibility());
return GV;
}
-/// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
-CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
- llvm::GlobalVariable *GV) {
+CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
// If constant emission failed, then this should be a C++ static
@@ -189,12 +200,12 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
if (!Init) {
if (!getContext().getLangOptions().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
- else {
+ else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
-
- EmitStaticCXXBlockVarDeclInit(D, GV);
+
+ EmitCXXGuardedInit(D, GV);
}
return GV;
}
@@ -209,8 +220,10 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
- 0, D.isThreadSpecified(),
+ /*InsertBefore*/ OldGV,
+ D.isThreadSpecified(),
D.getType().getAddressSpace());
+ GV->setVisibility(OldGV->getVisibility());
// Steal the name of the old global
GV->takeName(OldGV);
@@ -228,12 +241,12 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
return GV;
}
-void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
+void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
- llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage);
+ llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
@@ -244,10 +257,14 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
EmitVLASize(D.getType());
+
+ // Local static block variables must be treated as globals as they may be
+ // referenced in their RHS initializer block-literal expresion.
+ CGM.setStaticLocalDeclAddress(&D, GV);
// If this value has an initializer, emit it.
if (D.getInit())
- GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+ GV = AddInitializerToStaticVarDecl(D, GV);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
@@ -266,17 +283,13 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
if (D.hasAttr<UsedAttr>())
CGM.AddUsedGlobal(GV);
- if (getContext().getLangOptions().CPlusPlus)
- CGM.setStaticLocalDeclAddress(&D, GV);
-
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- const llvm::Type *LPtrTy =
- llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace());
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
// Emit global variable debug descriptor for static vars.
@@ -293,6 +306,15 @@ unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
return ByRefValueInfo.find(VD)->second.second;
}
+llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V) {
+ llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
+ V->getNameAsString());
+ return Loc;
+}
+
/// BuildByRefType - This routine changes a __block variable declared as T x
/// into:
///
@@ -307,7 +329,7 @@ unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
/// T x;
/// } x
///
-const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
if (Info.first)
return Info.first;
@@ -316,9 +338,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
std::vector<const llvm::Type *> Types;
- const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
- llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
// void *__isa;
Types.push_back(Int8PtrTy);
@@ -332,7 +352,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
// int32_t __size;
Types.push_back(Int32Ty);
- bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+ bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
if (HasCopyAndDispose) {
/// void *__copy_helper;
Types.push_back(Int8PtrTy);
@@ -343,7 +363,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
bool Packed = false;
CharUnits Align = getContext().getDeclAlign(D);
- if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) {
+ if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
// We have to insert padding.
// The struct above has 2 32-bit integers.
@@ -359,7 +379,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignmint and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -375,7 +395,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
// T x;
Types.push_back(ConvertTypeForMem(Ty));
- const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+ const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
@@ -484,18 +504,101 @@ namespace {
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
- V = CGF.Builder.CreateLoad(V);
- CGF.BuildBlockRelease(V);
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
}
-/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+
+/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
+/// non-zero parts of the specified initializer with equal or fewer than
+/// NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
+ unsigned &NumStores) {
+ // Zero and Undef never requires any extra stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return true;
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init))
+ return Init->isNullValue() || NumStores--;
+
+ // See if we can emit each element.
+ if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ // Anything else is hard and scary.
+ return false;
+}
+
+/// emitStoresForInitAfterMemset - For inits that
+/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
+/// stores that would be required.
+static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
+ CGBuilderTy &Builder) {
+ // Zero doesn't require any stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return;
+
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init)) {
+ if (!Init->isNullValue())
+ Builder.CreateStore(Init, Loc);
+ return;
+ }
+
+ assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
+ "Unknown value type!");
+
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (Elt->isNullValue()) continue;
+
+ // Otherwise, get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ Builder);
+ }
+}
+
+
+/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
+/// plus some stores to initialize a local variable instead of using a memcpy
+/// from a constant global. It is beneficial to use memset if the global is all
+/// zeros, or mostly zeros and large.
+static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) return true;
+
+
+ // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
+ // do it if it will require 6 or fewer scalar stores.
+ // TODO: Should budget depends on the size? Avoiding a large global warrants
+ // plopping in more stores.
+ unsigned StoreBudget = 6;
+ uint64_t SizeLimit = 32;
+
+ return GlobalSize > SizeLimit &&
+ canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+}
+
+
+/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
-void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
- SpecialInitFn *SpecialInit) {
+void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
bool isByRef = D.hasAttr<BlocksAttr>();
@@ -520,7 +623,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// If this variable is marked 'const', emit the value as a global.
if (CGM.getCodeGenOpts().MergeAllConstants &&
Ty.isConstant(getContext())) {
- EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage);
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
return;
}
@@ -542,9 +645,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
- const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
- llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0);
- NRVOFlag = CreateTempAlloca(BoolTy, "nrvo");
+ llvm::Value *Zero = Builder.getFalse();
+ NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
@@ -561,7 +664,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
Align = getContext().getDeclAlign(&D);
if (isByRef)
Align = std::max(Align,
- CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+ getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
Alloc->setAlignment(Align.getQuantity());
DeclPtr = Alloc;
}
@@ -569,9 +672,8 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
- DeclPtr = CreateStaticBlockVarDecl(D, Class,
- llvm::GlobalValue
- ::InternalLinkage);
+ DeclPtr = CreateStaticVarDecl(D, Class,
+ llvm::GlobalValue::InternalLinkage);
}
// FIXME: Can this happen?
@@ -582,8 +684,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (!DidCallStackSave) {
// Save the stack.
- const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
- llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+ llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
@@ -593,19 +694,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
DidCallStackSave = true;
// Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
- const llvm::Type *LElemPtrTy =
- llvm::PointerType::get(LElemTy, Ty.getAddressSpace());
+ const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
// Allocate memory for the array.
llvm::AllocaInst *VLA =
- Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+ Builder.CreateAlloca(llvm::Type::getInt8Ty(getLLVMContext()), VLASize, "vla");
VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
@@ -639,59 +740,65 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
}
if (isByRef) {
- const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
-
EnsureInsertPoint();
- llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
- llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
- llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
- llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
llvm::Value *V;
- int flag = 0;
- int flags = 0;
+
+ BlockFieldFlags fieldFlags;
+ bool fieldNeedsCopyDispose = false;
needsDispose = true;
if (Ty->isBlockPointerType()) {
- flag |= BLOCK_FIELD_IS_BLOCK;
- flags |= BLOCK_HAS_COPY_DISPOSE;
- } else if (BlockRequiresCopying(Ty)) {
- flag |= BLOCK_FIELD_IS_OBJECT;
- flags |= BLOCK_HAS_COPY_DISPOSE;
+ fieldFlags |= BLOCK_FIELD_IS_BLOCK;
+ fieldNeedsCopyDispose = true;
+ } else if (getContext().isObjCNSObjectType(Ty) ||
+ Ty->isObjCObjectPointerType()) {
+ fieldFlags |= BLOCK_FIELD_IS_OBJECT;
+ fieldNeedsCopyDispose = true;
+ } else if (getLangOptions().CPlusPlus) {
+ if (getContext().getBlockVarCopyInits(&D))
+ fieldNeedsCopyDispose = true;
+ else if (const CXXRecordDecl *record = D.getType()->getAsCXXRecordDecl())
+ fieldNeedsCopyDispose = !record->hasTrivialDestructor();
}
// FIXME: Someone double check this.
if (Ty.isObjCGCWeak())
- flag |= BLOCK_FIELD_IS_WEAK;
+ fieldFlags |= BLOCK_FIELD_IS_WEAK;
int isa = 0;
- if (flag&BLOCK_FIELD_IS_WEAK)
+ if (fieldFlags & BLOCK_FIELD_IS_WEAK)
isa = 1;
- V = llvm::ConstantInt::get(Int32Ty, isa);
- V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
- Builder.CreateStore(V, isa_field);
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ Builder.CreateStore(V, Builder.CreateStructGEP(DeclPtr, 0, "byref.isa"));
- Builder.CreateStore(DeclPtr, forwarding_field);
+ Builder.CreateStore(DeclPtr, Builder.CreateStructGEP(DeclPtr, 1,
+ "byref.forwarding"));
- V = llvm::ConstantInt::get(Int32Ty, flags);
- Builder.CreateStore(V, flags_field);
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (fieldNeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(DeclPtr, 2, "byref.flags"));
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
- V = llvm::ConstantInt::get(Int32Ty,
- CGM.GetTargetTypeStoreSize(V1).getQuantity());
- Builder.CreateStore(V, size_field);
+ V = llvm::ConstantInt::get(IntTy, CGM.GetTargetTypeStoreSize(V1).getQuantity());
+ Builder.CreateStore(V, Builder.CreateStructGEP(DeclPtr, 3, "byref.size"));
- if (flags & BLOCK_HAS_COPY_DISPOSE) {
- BlockHasCopyDispose = true;
+ if (fieldNeedsCopyDispose) {
llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
- Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag,
- Align.getQuantity()),
+ Builder.CreateStore(CGM.BuildbyrefCopyHelper(DeclPtr->getType(),
+ fieldFlags,
+ Align.getQuantity(), &D),
copy_helper);
llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
- Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
- Align.getQuantity()),
+ Builder.CreateStore(CGM.BuildbyrefDestroyHelper(DeclPtr->getType(),
+ fieldFlags,
+ Align.getQuantity(), &D),
destroy_helper);
}
}
@@ -700,9 +807,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
SpecialInit(*this, D, DeclPtr);
} else if (Init) {
llvm::Value *Loc = DeclPtr;
- if (isByRef)
- Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
- D.getNameAsString());
bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
@@ -712,27 +816,25 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
assert(Init != 0 && "Wasn't a simple constant init?");
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
- const llvm::Type *IntPtr =
- llvm::IntegerType::get(VMContext, LLVMPointerWidth);
llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtr,
+ llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(Ty).getQuantity());
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *BP = Int8PtrTy;
if (Loc->getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP, "tmp");
-
- llvm::Value *NotVolatile =
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
-
- // If the initializer is all zeros, codegen with memset.
- if (isa<llvm::ConstantAggregateZero>(Init)) {
- llvm::Value *Zero =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
- Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()),
- Loc, Zero, SizeVal, AlignVal, NotVolatile);
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(Init,
+ CGM.getTargetData().getTypeAllocSize(Init->getType()))) {
+ Builder.CreateMemSet(Loc, Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+ if (!Init->isNullValue()) {
+ Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(Init, Loc, Builder);
+ }
+
} else {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
@@ -747,20 +849,36 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
- Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(),
- SizeVal->getType()),
- Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, Align.getQuantity(), false);
}
} else if (Ty->isReferenceType()) {
RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
+ if (isByRef) {
+ // When RHS has side-effect, must go through "forwarding' field
+ // to get to the address of the __block variable descriptor.
+ if (Init->HasSideEffects(getContext()))
+ Loc = BuildBlockByrefAddress(DeclPtr, &D);
+ else
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+ }
EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
} else if (Init->getType()->isAnyComplexType()) {
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
EmitComplexExprIntoAddr(Init, Loc, isVolatile);
} else {
- EmitAggExpr(Init, Loc, isVolatile);
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+ EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false));
}
}
@@ -805,9 +923,8 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
}
// If this is a block variable, clean it up.
- // FIXME: this should be an EH cleanup as well. rdar://problem/8224178
if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
- EHStack.pushCleanup<CallBlockRelease>(NormalCleanup, DeclPtr);
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, DeclPtr);
}
/// Emit an alloca (or GlobalValue depending on target)
@@ -817,7 +934,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
QualType Ty = D.getType();
- CanQualType CTy = getContext().getCanonicalType(Ty);
llvm::Value *DeclPtr;
// If this is an aggregate or variable sized value, reuse the input pointer.
@@ -829,8 +945,9 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
// Store the initial value into the alloca.
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
- EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty);
+ EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
+ getContext().getDeclAlign(&D).getQuantity(), Ty,
+ CGM.getTBAAInfo(Ty));
}
Arg->setName(D.getName());
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index e2f197522b26..e295267896eb 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
#include "CGCXXABI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
@@ -34,14 +35,24 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
if (!CGF.hasAggregateLLVMType(T)) {
llvm::Value *V = CGF.EmitScalarExpr(Init);
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
+ CodeGenModule &CGM = CGF.CGM;
+ Qualifiers::GC GCAttr = CGM.getContext().getObjCGCAttrKind(T);
+ if (GCAttr == Qualifiers::Strong)
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, V, DeclPtr,
+ D.isThreadSpecified());
+ else if (GCAttr == Qualifiers::Weak)
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, V, DeclPtr);
+ else
+ CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
- CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
+ CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, isVolatile, true));
}
}
+/// Emit code to cause the destruction of the given variable with
+/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
CodeGenModule &CGM = CGF.CGM;
@@ -106,15 +117,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
return;
}
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
std::vector<const llvm::Type *> Params;
Params.push_back(Int8PtrTy);
// Get the destructor function type
const llvm::Type *DtorFnTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ Params, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
Params.clear();
@@ -138,6 +147,11 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
}
+void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr) {
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr);
+}
+
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
const llvm::FunctionType *FTy,
@@ -145,20 +159,22 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
+ if (!CGM.getContext().getLangOptions().AppleKext) {
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().Target.getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+ }
- // Set the section if needed.
- if (const char *Section =
- CGM.getContext().Target.getStaticInitSectionSpecifier())
- Fn->setSection(Section);
-
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
Fn->setDoesNotThrow();
return Fn;
}
void
-CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr) {
const llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -167,7 +183,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
- CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr);
if (D->hasAttr<InitPriorityAttr>()) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
@@ -240,13 +256,20 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
AddGlobalDtor(Fn);
}
+/// Emit the code necessary to initialize the given global variable.
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
- const VarDecl *D) {
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
- llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
- EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+ // Use guarded initialization if the global variable is weak due to
+ // being a class template's static data member.
+ if (Addr->hasWeakLinkage() && D->getInstantiatedFromStaticDataMember()) {
+ EmitCXXGuardedInit(*D, Addr);
+ } else {
+ EmitCXXGlobalVarDeclInit(*D, Addr);
+ }
FinishFunction();
}
@@ -283,143 +306,6 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
FinishFunction();
}
-static llvm::Constant *getGuardAcquireFn(CodeGenFunction &CGF) {
- // int __cxa_guard_acquire(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.ConvertType(CGF.getContext().IntTy),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
-}
-
-static llvm::Constant *getGuardReleaseFn(CodeGenFunction &CGF) {
- // void __cxa_guard_release(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
-}
-
-static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
- // void __cxa_guard_abort(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
-}
-
-namespace {
- struct CallGuardAbort : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
- CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- // It shouldn't be possible for this to throw, but if it can,
- // this should allow for the possibility of an invoke.
- CGF.Builder.CreateCall(getGuardAbortFn(CGF), Guard)
- ->setDoesNotThrow();
- }
- };
-}
-
-void
-CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
- llvm::GlobalVariable *GV) {
- // Bail out early if this initializer isn't reachable.
- if (!Builder.GetInsertBlock()) return;
-
- bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
-
- llvm::SmallString<256> GuardVName;
- CGM.getCXXABI().getMangleContext().mangleGuardVariable(&D, GuardVName);
-
- // Create the guard variable.
- llvm::GlobalVariable *GuardVariable =
- new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
- false, GV->getLinkage(),
- llvm::Constant::getNullValue(Int64Ty),
- GuardVName.str());
-
- // Load the first byte of the guard variable.
- const llvm::Type *PtrTy
- = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
- llvm::Value *V =
- Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
-
- llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
- llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
-
- // Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"),
- InitCheckBlock, EndBlock);
-
- EmitBlock(InitCheckBlock);
-
- // Variables used when coping with thread-safe statics and exceptions.
- if (ThreadsafeStatics) {
- // Call __cxa_guard_acquire.
- V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
-
- llvm::BasicBlock *InitBlock = createBasicBlock("init");
-
- Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
- InitBlock, EndBlock);
-
- // Call __cxa_guard_abort along the exceptional edge.
- if (Exceptions)
- EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
-
- EmitBlock(InitBlock);
- }
-
- if (D.getType()->isReferenceType()) {
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
- QualType T = D.getType();
- RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
- EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, Alignment, T);
- } else
- EmitDeclInit(*this, D, GV);
-
- if (ThreadsafeStatics) {
- // Pop the guard-abort cleanup if we pushed one.
- if (Exceptions)
- PopCleanupBlock();
-
- // Call __cxa_guard_release. This cannot throw.
- Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
- } else {
- llvm::Value *One =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
- Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
- }
-
- // Register the call to the destructor.
- if (!D.getType()->isReferenceType())
- EmitDeclDestroy(*this, D, GV);
-
- EmitBlock(EndBlock);
-}
-
/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
/// invoked, calls the default destructor on array elements in reverse order of
/// construction.
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 7fb616e5a150..6181965748cc 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -14,163 +14,18 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Support/CallSite.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CGException.h"
+#include "CGCleanup.h"
#include "TargetInfo.h"
using namespace clang;
using namespace CodeGen;
-/// Push an entry of the given size onto this protected-scope stack.
-char *EHScopeStack::allocate(size_t Size) {
- if (!StartOfBuffer) {
- unsigned Capacity = 1024;
- while (Capacity < Size) Capacity *= 2;
- StartOfBuffer = new char[Capacity];
- StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
- } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
- unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
- unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
-
- unsigned NewCapacity = CurrentCapacity;
- do {
- NewCapacity *= 2;
- } while (NewCapacity < UsedCapacity + Size);
-
- char *NewStartOfBuffer = new char[NewCapacity];
- char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
- char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
- memcpy(NewStartOfData, StartOfData, UsedCapacity);
- delete [] StartOfBuffer;
- StartOfBuffer = NewStartOfBuffer;
- EndOfBuffer = NewEndOfBuffer;
- StartOfData = NewStartOfData;
- }
-
- assert(StartOfBuffer + Size <= StartOfData);
- StartOfData -= Size;
- return StartOfData;
-}
-
-EHScopeStack::stable_iterator
-EHScopeStack::getEnclosingEHCleanup(iterator it) const {
- assert(it != end());
- do {
- if (isa<EHCleanupScope>(*it)) {
- if (cast<EHCleanupScope>(*it).isEHCleanup())
- return stabilize(it);
- return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
- }
- ++it;
- } while (it != end());
- return stable_end();
-}
-
-
-void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
- assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
- char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
- bool IsNormalCleanup = Kind & NormalCleanup;
- bool IsEHCleanup = Kind & EHCleanup;
- bool IsActive = !(Kind & InactiveCleanup);
- EHCleanupScope *Scope =
- new (Buffer) EHCleanupScope(IsNormalCleanup,
- IsEHCleanup,
- IsActive,
- Size,
- BranchFixups.size(),
- InnermostNormalCleanup,
- InnermostEHCleanup);
- if (IsNormalCleanup)
- InnermostNormalCleanup = stable_begin();
- if (IsEHCleanup)
- InnermostEHCleanup = stable_begin();
-
- return Scope->getCleanupBuffer();
-}
-
-void EHScopeStack::popCleanup() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHCleanupScope>(*begin()));
- EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
- InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
- StartOfData += Cleanup.getAllocatedSize();
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- // Destroy the cleanup.
- Cleanup.~EHCleanupScope();
-
- // Check whether we can shrink the branch-fixups stack.
- if (!BranchFixups.empty()) {
- // If we no longer have any normal cleanups, all the fixups are
- // complete.
- if (!hasNormalCleanups())
- BranchFixups.clear();
-
- // Otherwise we can still trim out unnecessary nulls.
- else
- popNullFixups();
- }
-}
-
-EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
- char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
- CatchDepth++;
- return new (Buffer) EHFilterScope(NumFilters);
-}
-
-void EHScopeStack::popFilter() {
- assert(!empty() && "popping exception stack when not empty");
-
- EHFilterScope &Filter = cast<EHFilterScope>(*begin());
- StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched filter push/pop");
- CatchDepth--;
-}
-
-EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
- char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
- CatchDepth++;
- EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
- for (unsigned I = 0; I != NumHandlers; ++I)
- Scope->getHandlers()[I].Index = getNextEHDestIndex();
- return Scope;
-}
-
-void EHScopeStack::pushTerminate() {
- char *Buffer = allocate(EHTerminateScope::getSize());
- CatchDepth++;
- new (Buffer) EHTerminateScope(getNextEHDestIndex());
-}
-
-/// Remove any 'null' fixups on the stack. However, we can't pop more
-/// fixups than the fixup depth on the innermost normal cleanup, or
-/// else fixups that we try to add to that cleanup will end up in the
-/// wrong place. We *could* try to shrink fixup depths, but that's
-/// actually a lot of work for little benefit.
-void EHScopeStack::popNullFixups() {
- // We expect this to only be called when there's still an innermost
- // normal cleanup; otherwise there really shouldn't be any fixups.
- assert(hasNormalCleanups());
-
- EHScopeStack::iterator it = find(InnermostNormalCleanup);
- unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
- assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
-
- while (BranchFixups.size() > MinSize &&
- BranchFixups.back().Destination == 0)
- BranchFixups.pop_back();
-}
-
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
@@ -287,7 +142,7 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
}
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
- const char *Name) {
+ llvm::StringRef Name) {
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
@@ -299,6 +154,7 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
}
const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
+const EHPersonality EHPersonality::GNU_C_SJLJ("__gcc_personality_sj0");
const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
@@ -306,6 +162,8 @@ const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
"objc_exception_throw");
static const EHPersonality &getCPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_C_SJLJ;
return EHPersonality::GNU_C;
}
@@ -357,24 +215,102 @@ const EHPersonality &EHPersonality::get(const LangOptions &L) {
return getCPersonality(L);
}
-static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF,
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
- const char *Name = Personality.getPersonalityFnName();
-
llvm::Constant *Fn =
- CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getInt32Ty(
- CGF.CGM.getLLVMContext()),
- true),
- Name);
- return llvm::ConstantExpr::getBitCast(Fn, CGF.CGM.PtrToInt8Ty);
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(CGM.getLLVMContext()),
+ true),
+ Personality.getPersonalityFnName());
+ return Fn;
+}
+
+static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+}
+
+/// Check whether a personality function could reasonably be swapped
+/// for a C++ personality function.
+static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
+ for (llvm::Constant::use_iterator
+ I = Fn->use_begin(), E = Fn->use_end(); I != E; ++I) {
+ llvm::User *User = *I;
+
+ // Conditionally white-list bitcasts.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
+ if (!PersonalityHasOnlyCXXUses(CE))
+ return false;
+ continue;
+ }
+
+ // Otherwise, it has to be a selector call.
+ if (!isa<llvm::EHSelectorInst>(User)) return false;
+
+ llvm::EHSelectorInst *Selector = cast<llvm::EHSelectorInst>(User);
+ for (unsigned I = 2, E = Selector->getNumArgOperands(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::GlobalVariable *GV
+ = dyn_cast<llvm::GlobalVariable>(Selector->getArgOperand(I));
+ if (!GV) continue;
+
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Try to use the C++ personality function in ObjC++. Not doing this
+/// can cause some incompatibilities with gcc, which is more
+/// aggressive about only using the ObjC++ personality in a function
+/// when it really needs it.
+void CodeGenModule::SimplifyPersonality() {
+ // For now, this is really a Darwin-specific operation.
+ if (Context.Target.getTriple().getOS() != llvm::Triple::Darwin)
+ return;
+
+ // If we're not in ObjC++ -fexceptions, there's nothing to do.
+ if (!Features.CPlusPlus || !Features.ObjC1 || !Features.Exceptions)
+ return;
+
+ const EHPersonality &ObjCXX = EHPersonality::get(Features);
+ const EHPersonality &CXX = getCXXPersonality(Features);
+ if (&ObjCXX == &CXX ||
+ ObjCXX.getPersonalityFnName() == CXX.getPersonalityFnName())
+ return;
+
+ llvm::Function *Fn =
+ getModule().getFunction(ObjCXX.getPersonalityFnName());
+
+ // Nothing to do if it's unused.
+ if (!Fn || Fn->use_empty()) return;
+
+ // Can't do the optimization if it has non-C++ uses.
+ if (!PersonalityHasOnlyCXXUses(Fn)) return;
+
+ // Create the C++ personality function and kill off the old
+ // function.
+ llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+
+ // This can happen if the user is screwing with us.
+ if (Fn->getType() != CXXFn->getType()) return;
+
+ Fn->replaceAllUsesWith(CXXFn);
+ Fn->eraseFromParent();
}
/// Returns the value to inject into a selector to indicate the
/// presence of a catch-all.
static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
// Possibly we should use @llvm.eh.catch.all.value here.
- return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+ return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
}
/// Returns the value to inject into a selector to indicate the
@@ -386,26 +322,11 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeExceptionCleanup : EHScopeStack::Cleanup {
- FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
- llvm::Value *ExnLocVar)
- : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
-
- llvm::Value *ShouldFreeVar;
- llvm::Value *ExnLocVar;
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
-
- llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
- "should-free-exnobj");
- CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
- CGF.EmitBlock(FreeBB);
- llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ struct FreeException {
+ static void Emit(CodeGenFunction &CGF, bool forEH,
+ llvm::Value *exn) {
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
->setDoesNotThrow();
- CGF.EmitBlock(DoneBB);
}
};
}
@@ -414,41 +335,17 @@ namespace {
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
-static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *ExnLoc) {
- // We want to release the allocated exception object if this
- // expression throws. We do this by pushing an EH-only cleanup
- // block which, furthermore, deactivates itself after the expression
- // is complete.
- llvm::AllocaInst *ShouldFreeVar =
- CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
- "should-free-exnobj.var");
- CGF.InitTempAlloca(ShouldFreeVar,
- llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
-
- // A variable holding the exception pointer. This is necessary
- // because the throw expression does not necessarily dominate the
- // cleanup, for example if it appears in a conditional expression.
- llvm::AllocaInst *ExnLocVar =
- CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
-
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
+ llvm::Value *addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- // FIXME: stmt expressions might require this to be a normal
- // cleanup, too.
- CGF.EHStack.pushCleanup<FreeExceptionCleanup>(EHCleanup,
- ShouldFreeVar,
- ExnLocVar);
- EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
-
- CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
- CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
- ShouldFreeVar);
+ CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- const llvm::Type *Ty = CGF.ConvertType(E->getType())->getPointerTo();
- llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
+ const llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -457,22 +354,10 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
- CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false);
-
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
- ShouldFreeVar);
+ CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
- // Technically, the exception object is like a temporary; it has to
- // be cleaned up when its full-expression is complete.
- // Unfortunately, the AST represents full-expressions by creating a
- // CXXExprWithTemporaries, which it only does when there are actually
- // temporaries.
- //
- // If any cleanups have been added since we pushed ours, they must
- // be from temporaries; this will get popped at the same time.
- // Otherwise we need to pop ours off. FIXME: this is very brittle.
- if (Cleanup == CGF.EHStack.stable_begin())
- CGF.PopCleanupBlock();
+ // Deactivate the cleanup block.
+ CGF.DeactivateCleanupBlock(cleanup);
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
@@ -495,8 +380,10 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
Builder.CreateUnreachable();
}
- // Clear the insertion point to indicate we are in unreachable code.
- Builder.ClearInsertionPoint();
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+
return;
}
@@ -517,7 +404,8 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
// Now throw the exception.
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, true);
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
+ /*ForEH=*/true);
// The address of the destructor. If the exception type has a
// trivial destructor (or isn't a record), we just pass null.
@@ -545,16 +433,13 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
Builder.CreateUnreachable();
}
- // Clear the insertion point to indicate we are in unreachable code.
- Builder.ClearInsertionPoint();
-
- // FIXME: For now, emit a dummy basic block because expr emitters in generally
- // are not ready to handle emitting expressions at unreachable points.
- EnsureInsertPoint();
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -575,13 +460,14 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
for (unsigned I = 0; I != NumExceptions; ++I) {
QualType Ty = Proto->getExceptionType(I);
QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
Filter->setFilter(I, EHType);
}
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -625,7 +511,7 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
if (CaughtType->isObjCObjectPointerType())
TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
else
- TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, /*ForEH=*/true);
CatchScope->setHandler(I, TypeInfo, Handler);
} else {
// No exception decl indicates '...', a catch-all.
@@ -655,7 +541,7 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return 0;
// Check the innermost scope for a cached landing pad. If this is
@@ -739,8 +625,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- const EHPersonality &Personality =
- EHPersonality::get(CGF.CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(getLangOptions());
// Create and configure the landing pad.
llvm::BasicBlock *LP = createBasicBlock("lpad");
@@ -757,7 +642,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Build the selector arguments.
llvm::SmallVector<llvm::Value*, 8> EHSelector;
EHSelector.push_back(Exn);
- EHSelector.push_back(getPersonalityFn(*this, Personality));
+ EHSelector.push_back(getOpaquePersonalityFn(CGM, Personality));
// Accumulate all the handlers in scope.
llvm::DenseMap<llvm::Value*, UnwindDest> EHHandlers;
@@ -837,7 +722,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// If we have a catch-all, add null to the selector.
if (CatchAll.isValid()) {
- EHSelector.push_back(getCatchAllValue(CGF));
+ EHSelector.push_back(getCatchAllValue(*this));
// If we have an EH filter, we need to add those handlers in the
// right place in the selector, which is to say, at the end.
@@ -853,14 +738,14 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Also check whether we need a cleanup.
if (UseInvokeInlineHack || HasEHCleanup)
EHSelector.push_back(UseInvokeInlineHack
- ? getCatchAllValue(CGF)
- : getCleanupValue(CGF));
+ ? getCatchAllValue(*this)
+ : getCleanupValue(*this));
// Otherwise, signal that we at least have cleanups.
} else if (UseInvokeInlineHack || HasEHCleanup) {
EHSelector.push_back(UseInvokeInlineHack
- ? getCatchAllValue(CGF)
- : getCleanupValue(CGF));
+ ? getCatchAllValue(*this)
+ : getCleanupValue(*this));
} else {
assert(LastToEmitInLoop > 2);
LastToEmitInLoop--;
@@ -902,7 +787,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Check whether the exception matches.
llvm::CallInst *Id
= Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Builder.CreateBitCast(Type, Int8PtrTy));
Id->setDoesNotThrow();
Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
Match, Next);
@@ -1141,55 +1026,54 @@ static void InitCatchParam(CodeGenFunction &CGF,
return;
}
- // FIXME: this *really* needs to be done via a proper, Sema-emitted
- // initializer expression.
-
- CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
- assert(RD && "aggregate catch type was not a record!");
+ assert(isa<RecordType>(CatchType) && "unexpected catch type!");
const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
- if (RD->hasTrivialCopyConstructor()) {
- llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
- CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ // Check for a copy expression. If we don't have a copy expression,
+ // that means a trivial copy is okay.
+ const Expr *copyExpr = CatchParam.getInit();
+ if (!copyExpr) {
+ llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
return;
}
// We have to call __cxa_get_exception_ptr to get the adjusted
// pointer before copying.
- llvm::CallInst *AdjustedExn =
+ llvm::CallInst *rawAdjustedExn =
CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
- AdjustedExn->setDoesNotThrow();
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ rawAdjustedExn->setDoesNotThrow();
- CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
- assert(CD && "record has no copy constructor!");
- llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+ // Cast that to the appropriate type.
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
- CD->getThisType(CGF.getContext())));
- CallArgs.push_back(std::make_pair(RValue::get(Cast),
- CD->getParamDecl(0)->getType()));
-
- const FunctionProtoType *FPT
- = CD->getType()->getAs<FunctionProtoType>();
+ // The copy expression is defined in terms of an OpaqueValueExpr.
+ // Find it and map it to the adjusted expression.
+ CodeGenFunction::OpaqueValueMapping
+ opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
+ CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
// Call the copy ctor in a terminate scope.
CGF.EHStack.pushTerminate();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- CopyCtor, ReturnValueSlot(), CallArgs, CD);
+
+ // Perform the copy construction.
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, false, false));
+
+ // Leave the terminate scope.
CGF.EHStack.popTerminate();
+ // Undo the opaque value mapping.
+ opaque.pop();
+
// Finally we can call __cxa_begin_catch.
CallBeginCatch(CGF, Exn, true);
}
/// Begins a catch statement by initializing the catch variable and
/// calling __cxa_begin_catch.
-static void BeginCatch(CodeGenFunction &CGF,
- const CXXCatchStmt *S) {
+static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
// We have to be very careful with the ordering of cleanups here:
// C++ [except.throw]p4:
// The destruction [of the exception temporary] occurs
@@ -1221,7 +1105,7 @@ static void BeginCatch(CodeGenFunction &CGF,
}
// Emit the local.
- CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam, &InitCatchParam);
}
namespace {
@@ -1428,7 +1312,7 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
// Whether the finally block is being executed for EH purposes.
- llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(Builder.getInt1Ty(),
"finally.for-eh");
InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
@@ -1502,7 +1386,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
// Tell the backend what the exception table should be:
// nothing but a catch-all.
- llvm::Value *Args[3] = { Exn, getPersonalityFn(*this, Personality),
+ llvm::Value *Args[3] = { Exn, getOpaquePersonalityFn(CGM, Personality),
getCatchAllValue(*this) };
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
Args, Args+3, "eh.selector")
@@ -1511,7 +1395,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
TerminateCall->setDoesNotThrow();
- CGF.Builder.CreateUnreachable();
+ Builder.CreateUnreachable();
// Restore the saved insertion state.
Builder.restoreIP(SavedIP);
@@ -1553,8 +1437,9 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
+ llvm::StringRef RethrowName = Personality.getCatchallRethrowFnName();
llvm::Constant *RethrowFn;
- if (const char *RethrowName = Personality.getCatchallRethrowFnName())
+ if (!RethrowName.empty())
RethrowFn = getCatchallRethrowFn(*this, RethrowName);
else
RethrowFn = getUnwindResumeOrRethrowFn();
@@ -1569,6 +1454,3 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
return RethrowBlock;
}
-EHScopeStack::Cleanup::~Cleanup() {
- llvm_unreachable("Cleanup is indestructable");
-}
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
index f1294746a42d..1f9b8964dcae 100644
--- a/lib/CodeGen/CGException.h
+++ b/lib/CodeGen/CGException.h
@@ -15,578 +15,40 @@
#ifndef CLANG_CODEGEN_CGEXCEPTION_H
#define CLANG_CODEGEN_CGEXCEPTION_H
-/// EHScopeStack is defined in CodeGenFunction.h, but its
-/// implementation is in this file and in CGException.cpp.
-#include "CodeGenFunction.h"
-
-namespace llvm {
- class Value;
- class BasicBlock;
-}
+#include "llvm/ADT/StringRef.h"
namespace clang {
+class LangOptions;
+
namespace CodeGen {
/// The exceptions personality for a function. When
class EHPersonality {
- const char *PersonalityFn;
+ llvm::StringRef PersonalityFn;
// If this is non-null, this personality requires a non-standard
// function for rethrowing an exception after a catchall cleanup.
// This function must have prototype void(void*).
- const char *CatchallRethrowFn;
+ llvm::StringRef CatchallRethrowFn;
- EHPersonality(const char *PersonalityFn,
- const char *CatchallRethrowFn = 0)
+ EHPersonality(llvm::StringRef PersonalityFn,
+ llvm::StringRef CatchallRethrowFn = llvm::StringRef())
: PersonalityFn(PersonalityFn),
CatchallRethrowFn(CatchallRethrowFn) {}
public:
static const EHPersonality &get(const LangOptions &Lang);
static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
static const EHPersonality GNU_ObjC;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
- const char *getPersonalityFnName() const { return PersonalityFn; }
- const char *getCatchallRethrowFnName() const { return CatchallRethrowFn; }
-};
-
-/// A protected scope for zero-cost EH handling.
-class EHScope {
- llvm::BasicBlock *CachedLandingPad;
-
- unsigned K : 2;
-
-protected:
- enum { BitsRemaining = 30 };
-
-public:
- enum Kind { Cleanup, Catch, Terminate, Filter };
-
- EHScope(Kind K) : CachedLandingPad(0), K(K) {}
-
- Kind getKind() const { return static_cast<Kind>(K); }
-
- llvm::BasicBlock *getCachedLandingPad() const {
- return CachedLandingPad;
- }
-
- void setCachedLandingPad(llvm::BasicBlock *Block) {
- CachedLandingPad = Block;
- }
-};
-
-/// A scope which attempts to handle some, possibly all, types of
-/// exceptions.
-///
-/// Objective C @finally blocks are represented using a cleanup scope
-/// after the catch scope.
-class EHCatchScope : public EHScope {
- unsigned NumHandlers : BitsRemaining;
-
- // In effect, we have a flexible array member
- // Handler Handlers[0];
- // But that's only standard in C99, not C++, so we have to do
- // annoying pointer arithmetic instead.
-
-public:
- struct Handler {
- /// A type info value, or null (C++ null, not an LLVM null pointer)
- /// for a catch-all.
- llvm::Value *Type;
-
- /// The catch handler for this type.
- llvm::BasicBlock *Block;
-
- /// The unwind destination index for this handler.
- unsigned Index;
- };
-
-private:
- friend class EHScopeStack;
-
- Handler *getHandlers() {
- return reinterpret_cast<Handler*>(this+1);
- }
-
- const Handler *getHandlers() const {
- return reinterpret_cast<const Handler*>(this+1);
- }
-
-public:
- static size_t getSizeForNumHandlers(unsigned N) {
- return sizeof(EHCatchScope) + N * sizeof(Handler);
- }
-
- EHCatchScope(unsigned NumHandlers)
- : EHScope(Catch), NumHandlers(NumHandlers) {
- }
-
- unsigned getNumHandlers() const {
- return NumHandlers;
- }
-
- void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
- setHandler(I, /*catchall*/ 0, Block);
- }
-
- void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
- assert(I < getNumHandlers());
- getHandlers()[I].Type = Type;
- getHandlers()[I].Block = Block;
- }
-
- const Handler &getHandler(unsigned I) const {
- assert(I < getNumHandlers());
- return getHandlers()[I];
- }
-
- typedef const Handler *iterator;
- iterator begin() const { return getHandlers(); }
- iterator end() const { return getHandlers() + getNumHandlers(); }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Catch;
- }
-};
-
-/// A cleanup scope which generates the cleanup blocks lazily.
-class EHCleanupScope : public EHScope {
- /// Whether this cleanup needs to be run along normal edges.
- bool IsNormalCleanup : 1;
-
- /// Whether this cleanup needs to be run along exception edges.
- bool IsEHCleanup : 1;
-
- /// Whether this cleanup was activated before all normal uses.
- bool ActivatedBeforeNormalUse : 1;
-
- /// Whether this cleanup was activated before all EH uses.
- bool ActivatedBeforeEHUse : 1;
-
- /// The amount of extra storage needed by the Cleanup.
- /// Always a multiple of the scope-stack alignment.
- unsigned CleanupSize : 12;
-
- /// The number of fixups required by enclosing scopes (not including
- /// this one). If this is the top cleanup scope, all the fixups
- /// from this index onwards belong to this scope.
- unsigned FixupDepth : BitsRemaining - 16;
-
- /// The nearest normal cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingNormal;
-
- /// The nearest EH cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingEH;
-
- /// The dual entry/exit block along the normal edge. This is lazily
- /// created if needed before the cleanup is popped.
- llvm::BasicBlock *NormalBlock;
-
- /// The dual entry/exit block along the EH edge. This is lazily
- /// created if needed before the cleanup is popped.
- llvm::BasicBlock *EHBlock;
-
- /// An optional i1 variable indicating whether this cleanup has been
- /// activated yet. This has one of three states:
- /// - it is null if the cleanup is inactive
- /// - it is activeSentinel() if the cleanup is active and was not
- /// required before activation
- /// - it points to a valid variable
- llvm::AllocaInst *ActiveVar;
-
- /// Extra information required for cleanups that have resolved
- /// branches through them. This has to be allocated on the side
- /// because everything on the cleanup stack has be trivially
- /// movable.
- struct ExtInfo {
- /// The destinations of normal branch-afters and branch-throughs.
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
-
- /// Normal branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
- BranchAfters;
-
- /// The destinations of EH branch-afters and branch-throughs.
- /// TODO: optimize for the extremely common case of a single
- /// branch-through.
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
-
- /// EH branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
- EHBranchAfters;
- };
- mutable struct ExtInfo *ExtInfo;
-
- struct ExtInfo &getExtInfo() {
- if (!ExtInfo) ExtInfo = new struct ExtInfo();
- return *ExtInfo;
- }
-
- const struct ExtInfo &getExtInfo() const {
- if (!ExtInfo) ExtInfo = new struct ExtInfo();
- return *ExtInfo;
- }
-
-public:
- /// Gets the size required for a lazy cleanup scope with the given
- /// cleanup-data requirements.
- static size_t getSizeForCleanupSize(size_t Size) {
- return sizeof(EHCleanupScope) + Size;
- }
-
- size_t getAllocatedSize() const {
- return sizeof(EHCleanupScope) + CleanupSize;
- }
-
- EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
- unsigned CleanupSize, unsigned FixupDepth,
- EHScopeStack::stable_iterator EnclosingNormal,
- EHScopeStack::stable_iterator EnclosingEH)
- : EHScope(EHScope::Cleanup),
- IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
- ActivatedBeforeNormalUse(IsActive),
- ActivatedBeforeEHUse(IsActive),
- CleanupSize(CleanupSize), FixupDepth(FixupDepth),
- EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
- NormalBlock(0), EHBlock(0),
- ActiveVar(IsActive ? activeSentinel() : 0),
- ExtInfo(0)
- {
- assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
- }
-
- ~EHCleanupScope() {
- delete ExtInfo;
- }
-
- bool isNormalCleanup() const { return IsNormalCleanup; }
- llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
- void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
-
- bool isEHCleanup() const { return IsEHCleanup; }
- llvm::BasicBlock *getEHBlock() const { return EHBlock; }
- void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
-
- static llvm::AllocaInst *activeSentinel() {
- return reinterpret_cast<llvm::AllocaInst*>(1);
- }
-
- bool isActive() const { return ActiveVar != 0; }
- llvm::AllocaInst *getActiveVar() const { return ActiveVar; }
- void setActiveVar(llvm::AllocaInst *Var) { ActiveVar = Var; }
-
- bool wasActivatedBeforeNormalUse() const { return ActivatedBeforeNormalUse; }
- void setActivatedBeforeNormalUse(bool B) { ActivatedBeforeNormalUse = B; }
-
- bool wasActivatedBeforeEHUse() const { return ActivatedBeforeEHUse; }
- void setActivatedBeforeEHUse(bool B) { ActivatedBeforeEHUse = B; }
-
- unsigned getFixupDepth() const { return FixupDepth; }
- EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
- return EnclosingNormal;
- }
- EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
- return EnclosingEH;
- }
-
- size_t getCleanupSize() const { return CleanupSize; }
- void *getCleanupBuffer() { return this + 1; }
-
- EHScopeStack::Cleanup *getCleanup() {
- return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
- }
-
- /// True if this cleanup scope has any branch-afters or branch-throughs.
- bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
-
- /// Add a branch-after to this cleanup scope. A branch-after is a
- /// branch from a point protected by this (normal) cleanup to a
- /// point in the normal cleanup scope immediately containing it.
- /// For example,
- /// for (;;) { A a; break; }
- /// contains a branch-after.
- ///
- /// Branch-afters each have their own destination out of the
- /// cleanup, guaranteed distinct from anything else threaded through
- /// it. Therefore branch-afters usually force a switch after the
- /// cleanup.
- void addBranchAfter(llvm::ConstantInt *Index,
- llvm::BasicBlock *Block) {
- struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.Branches.insert(Block))
- ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
- }
-
- /// Return the number of unique branch-afters on this scope.
- unsigned getNumBranchAfters() const {
- return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
- }
-
- llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
- assert(I < getNumBranchAfters());
- return ExtInfo->BranchAfters[I].first;
- }
-
- llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
- assert(I < getNumBranchAfters());
- return ExtInfo->BranchAfters[I].second;
- }
-
- /// Add a branch-through to this cleanup scope. A branch-through is
- /// a branch from a scope protected by this (normal) cleanup to an
- /// enclosing scope other than the immediately-enclosing normal
- /// cleanup scope.
- ///
- /// In the following example, the branch through B's scope is a
- /// branch-through, while the branch through A's scope is a
- /// branch-after:
- /// for (;;) { A a; B b; break; }
- ///
- /// All branch-throughs have a common destination out of the
- /// cleanup, one possibly shared with the fall-through. Therefore
- /// branch-throughs usually don't force a switch after the cleanup.
- ///
- /// \return true if the branch-through was new to this scope
- bool addBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().Branches.insert(Block);
- }
-
- /// Determines if this cleanup scope has any branch throughs.
- bool hasBranchThroughs() const {
- if (!ExtInfo) return false;
- return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
- }
-
- // Same stuff, only for EH branches instead of normal branches.
- // It's quite possible that we could find a better representation
- // for this.
-
- bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
- void addEHBranchAfter(llvm::ConstantInt *Index,
- llvm::BasicBlock *Block) {
- struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.EHBranches.insert(Block))
- ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
- }
-
- unsigned getNumEHBranchAfters() const {
- return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
- }
-
- llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].first;
- }
-
- llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].second;
- }
-
- bool addEHBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().EHBranches.insert(Block);
- }
-
- bool hasEHBranchThroughs() const {
- if (!ExtInfo) return false;
- return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
- }
-
- static bool classof(const EHScope *Scope) {
- return (Scope->getKind() == Cleanup);
- }
-};
-
-/// An exceptions scope which filters exceptions thrown through it.
-/// Only exceptions matching the filter types will be permitted to be
-/// thrown.
-///
-/// This is used to implement C++ exception specifications.
-class EHFilterScope : public EHScope {
- unsigned NumFilters : BitsRemaining;
-
- // Essentially ends in a flexible array member:
- // llvm::Value *FilterTypes[0];
-
- llvm::Value **getFilters() {
- return reinterpret_cast<llvm::Value**>(this+1);
- }
-
- llvm::Value * const *getFilters() const {
- return reinterpret_cast<llvm::Value* const *>(this+1);
- }
-
-public:
- EHFilterScope(unsigned NumFilters) :
- EHScope(Filter), NumFilters(NumFilters) {}
-
- static size_t getSizeForNumFilters(unsigned NumFilters) {
- return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
- }
-
- unsigned getNumFilters() const { return NumFilters; }
-
- void setFilter(unsigned I, llvm::Value *FilterValue) {
- assert(I < getNumFilters());
- getFilters()[I] = FilterValue;
- }
-
- llvm::Value *getFilter(unsigned I) const {
- assert(I < getNumFilters());
- return getFilters()[I];
- }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Filter;
- }
+ llvm::StringRef getPersonalityFnName() const { return PersonalityFn; }
+ llvm::StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
};
-/// An exceptions scope which calls std::terminate if any exception
-/// reaches it.
-class EHTerminateScope : public EHScope {
- unsigned DestIndex : BitsRemaining;
-public:
- EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
- static size_t getSize() { return sizeof(EHTerminateScope); }
-
- unsigned getDestIndex() const { return DestIndex; }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Terminate;
- }
-};
-
-/// A non-stable pointer into the scope stack.
-class EHScopeStack::iterator {
- char *Ptr;
-
- friend class EHScopeStack;
- explicit iterator(char *Ptr) : Ptr(Ptr) {}
-
-public:
- iterator() : Ptr(0) {}
-
- EHScope *get() const {
- return reinterpret_cast<EHScope*>(Ptr);
- }
-
- EHScope *operator->() const { return get(); }
- EHScope &operator*() const { return *get(); }
-
- iterator &operator++() {
- switch (get()->getKind()) {
- case EHScope::Catch:
- Ptr += EHCatchScope::getSizeForNumHandlers(
- static_cast<const EHCatchScope*>(get())->getNumHandlers());
- break;
-
- case EHScope::Filter:
- Ptr += EHFilterScope::getSizeForNumFilters(
- static_cast<const EHFilterScope*>(get())->getNumFilters());
- break;
-
- case EHScope::Cleanup:
- Ptr += static_cast<const EHCleanupScope*>(get())
- ->getAllocatedSize();
- break;
-
- case EHScope::Terminate:
- Ptr += EHTerminateScope::getSize();
- break;
- }
-
- return *this;
- }
-
- iterator next() {
- iterator copy = *this;
- ++copy;
- return copy;
- }
-
- iterator operator++(int) {
- iterator copy = *this;
- operator++();
- return copy;
- }
-
- bool encloses(iterator other) const { return Ptr >= other.Ptr; }
- bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
-
- bool operator==(iterator other) const { return Ptr == other.Ptr; }
- bool operator!=(iterator other) const { return Ptr != other.Ptr; }
-};
-
-inline EHScopeStack::iterator EHScopeStack::begin() const {
- return iterator(StartOfData);
-}
-
-inline EHScopeStack::iterator EHScopeStack::end() const {
- return iterator(EndOfBuffer);
-}
-
-inline void EHScopeStack::popCatch() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHCatchScope>(*begin()));
- StartOfData += EHCatchScope::getSizeForNumHandlers(
- cast<EHCatchScope>(*begin()).getNumHandlers());
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
-}
-
-inline void EHScopeStack::popTerminate() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHTerminateScope>(*begin()));
- StartOfData += EHTerminateScope::getSize();
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
-}
-
-inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
- assert(sp.isValid() && "finding invalid savepoint");
- assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
- return iterator(EndOfBuffer - sp.Size);
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::stabilize(iterator ir) const {
- assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
- return stable_iterator(EndOfBuffer - ir.Ptr);
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveNormalCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingNormalCleanup();
- }
- return stable_end();
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveEHCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingEHCleanup();
- }
- return stable_end();
-}
-
}
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 3750ab80c3fc..1b7e7a007ed2 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -29,6 +29,18 @@ using namespace CodeGen;
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
+llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
+ unsigned addressSpace =
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
+
+ const llvm::PointerType *destType = Int8PtrTy;
+ if (addressSpace)
+ destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
+
+ if (value->getType() == destType) return value;
+ return Builder.CreateBitCast(value, destType);
+}
+
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
@@ -68,7 +80,7 @@ llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
llvm::Value *MemPtr = EmitScalarExpr(E);
- return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
}
QualType BoolTy = getContext().BoolTy;
@@ -78,35 +90,40 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
}
-/// EmitAnyExpr - Emit code to compute the specified expression which can have
-/// any type. The result is returned as an RValue struct. If this is an
-/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which
+/// can have any type. The result is returned as an RValue struct.
+/// If this is an aggregate expression, AggSlot indicates where the
/// result should be returned.
-RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
- bool IsAggLocVolatile, bool IgnoreResult,
- bool IsInitializer) {
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
+ bool IgnoreResult) {
if (!hasAggregateLLVMType(E->getType()))
return RValue::get(EmitScalarExpr(E, IgnoreResult));
else if (E->getType()->isAnyComplexType())
- return RValue::getComplex(EmitComplexExpr(E, false, false,
- IgnoreResult, IgnoreResult));
+ return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
- EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
- return RValue::getAggregate(AggLoc, IsAggLocVolatile);
+ EmitAggExpr(E, AggSlot, IgnoreResult);
+ return AggSlot.asRValue();
}
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
-RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
- bool IsAggLocVolatile,
- bool IsInitializer) {
- llvm::Value *AggLoc = 0;
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
+ AggValueSlot AggSlot = AggValueSlot::ignored();
if (hasAggregateLLVMType(E->getType()) &&
!E->getType()->isAnyComplexType())
- AggLoc = CreateMemTemp(E->getType(), "agg.tmp");
- return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
- IsInitializer);
+ AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggSlot);
}
/// EmitAnyExprToMem - Evaluate an expression into a given memory
@@ -118,7 +135,7 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
if (E->getType()->isComplexType())
EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
@@ -126,34 +143,34 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
}
}
+namespace {
/// \brief An adjustment to be made to the temporary created when emitting a
/// reference binding, which accesses a particular subobject of that temporary.
-struct SubobjectAdjustment {
- enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
-
- union {
- struct {
- const CastExpr *BasePath;
- const CXXRecordDecl *DerivedClass;
- } DerivedToBase;
-
- FieldDecl *Field;
+ struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment) {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment) {
+ this->Field = Field;
+ }
};
-
- SubobjectAdjustment(const CastExpr *BasePath,
- const CXXRecordDecl *DerivedClass)
- : Kind(DerivedToBaseAdjustment)
- {
- DerivedToBase.BasePath = BasePath;
- DerivedToBase.DerivedClass = DerivedClass;
- }
-
- SubobjectAdjustment(FieldDecl *Field)
- : Kind(FieldAdjustment)
- {
- this->Field = Field;
- }
-};
+}
static llvm::Value *
CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
@@ -161,8 +178,10 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
llvm::SmallString<256> Name;
- CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Name);
-
+ llvm::raw_svector_ostream Out(Name);
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
+ Out.flush();
+
const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
// Create the reference temporary.
@@ -180,14 +199,14 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
}
static llvm::Value *
-EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
+EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
const NamedDecl *InitializedDecl) {
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
- if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
+ if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
@@ -197,10 +216,9 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
RValue RV;
- if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
+ if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
-
if (LV.isSimple())
return LV.getAddress();
@@ -232,8 +250,8 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
continue;
}
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
- ME->getBase()->getType()->isRecordType()) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ assert(ME->getBase()->getType()->isRecordType());
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
E = ME->getBase();
Adjustments.push_back(SubobjectAdjustment(Field));
@@ -247,13 +265,16 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
// Create a reference temporary if necessary.
+ AggValueSlot AggSlot = AggValueSlot::ignored();
if (CGF.hasAggregateLLVMType(E->getType()) &&
- !E->getType()->isAnyComplexType())
+ !E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
+ InitializedDecl != 0);
+ }
- RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false,
- /*IgnoreResult=*/false, InitializedDecl);
+ RV = CGF.EmitAnyExpr(E, AggSlot);
if (InitializedDecl) {
// Get the destructor for the reference temporary.
@@ -328,7 +349,7 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
RValue
-CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
@@ -343,8 +364,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
if (VD->hasGlobalStorage()) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
- CGF.EmitCXXGlobalDtorRegistration(DtorFn,
- cast<llvm::Constant>(ReferenceTemporary));
+ EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
return RValue::get(Value);
}
@@ -370,14 +391,14 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
if (!CatchUndefined)
return;
- Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
+ // This needs to be to the standard address space.
+ Address = Builder.CreateBitCast(Address, Int8PtrTy);
const llvm::Type *IntPtrT = IntPtrTy;
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
- const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
// In time, people may want to control this and use a 1 here.
- llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
+ llvm::Value *Arg = Builder.getFalse();
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
llvm::BasicBlock *Cont = createBasicBlock();
llvm::BasicBlock *Check = createBasicBlock();
@@ -468,7 +489,8 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
LValue LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
+ EmitCheck(LV.getAddress(),
+ getContext().getTypeSizeInChars(E->getType()).getQuantity());
return LV;
}
@@ -498,7 +520,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
case Expr::CompoundAssignOperatorClass:
- return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
+ if (!E->getType()->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
@@ -523,8 +547,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
- case Expr::CXXExprWithTemporariesClass:
- return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
+ case Expr::ExprWithCleanupsClass:
+ return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
case Expr::CXXScalarValueInitExprClass:
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
@@ -538,11 +562,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
case Expr::ObjCPropertyRefExprClass:
return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
- case Expr::ObjCImplicitSetterGetterRefExprClass:
- return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
- case Expr::ObjCSuperExprClass:
- return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
-
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
@@ -557,8 +576,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
case Expr::ConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::BinaryConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::OpaqueValueExprClass:
+ return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -571,35 +594,58 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty) {
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo) {
llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
if (Volatile)
Load->setVolatile(true);
if (Alignment)
Load->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Load, TBAAInfo);
- // Bool can have different representation in memory than in registers.
- llvm::Value *V = Load;
- if (Ty->isBooleanType())
- if (V->getType() != llvm::Type::getInt1Ty(VMContext))
- V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+ return EmitFromMemory(Load, Ty);
+}
- return V;
+static bool isBooleanUnderlyingType(QualType Ty) {
+ if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+ return false;
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment,
- QualType Ty) {
+llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ // This should really always be an i1, but sometimes it's already
+ // an i8, and it's awkward to track those cases down.
+ if (Value->getType()->isIntegerTy(1))
+ return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
+ assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
+ }
- if (Ty->isBooleanType()) {
- // Bool can have different representation in memory than in registers.
- const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
- Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
+ return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
+ return Value;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment,
+ QualType Ty,
+ llvm::MDNode *TBAAInfo) {
+ Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (Alignment)
Store->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Store, TBAAInfo);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -622,7 +668,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
// Everything needs a load.
return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
- LV.getAlignment(), ExprType));
+ LV.getAlignment(), ExprType,
+ LV.getTBAAInfo()));
}
@@ -641,11 +688,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
if (LV.isBitField())
return EmitLoadOfBitfieldLValue(LV, ExprType);
- if (LV.isPropertyRef())
- return EmitLoadOfPropertyRefLValue(LV, ExprType);
-
- assert(LV.isKVCRef() && "Unknown LValue type!");
- return EmitLoadOfKVCRefLValue(LV, ExprType);
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return EmitLoadOfPropertyRefLValue(LV);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
@@ -671,13 +715,13 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Offset by the byte offset, if used.
if (AI.FieldByteOffset) {
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
- Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = EmitCastToVoidPtr(Ptr);
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
+ AI.AccessWidth,
ExprType.getAddressSpace());
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -720,16 +764,6 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
return RValue::get(Res);
}
-RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
- QualType ExprType) {
- return EmitObjCPropertyGet(LV.getPropertyRefExpr());
-}
-
-RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
- QualType ExprType) {
- return EmitObjCPropertyGet(LV.getKVCRefExpr());
-}
-
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
@@ -757,9 +791,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
- Vec = Builder.CreateShuffleVector(Vec,
- llvm::UndefValue::get(Vec->getType()),
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
MaskV, "tmp");
return RValue::get(Vec);
}
@@ -790,11 +823,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isBitField())
return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
- if (Dst.isPropertyRef())
- return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
-
- assert(Dst.isKVCRef() && "Unknown LValue type");
- return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+ assert(Dst.isPropertyRef() && "Unknown LValue type");
+ return EmitStoreThroughPropertyRefLValue(Src, Dst);
}
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
@@ -831,7 +861,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
assert(Src.isScalar() && "Can't emit an agg store with this method");
EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
- Dst.isVolatileQualified(), Dst.getAlignment(), Ty);
+ Dst.isVolatileQualified(), Dst.getAlignment(), Ty,
+ Dst.getTBAAInfo());
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -877,6 +908,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the field pointer.
llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+ unsigned addressSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
// Only offset by the field index if used, so that incoming values are not
// required to be structures.
@@ -885,14 +918,15 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Offset by the byte offset, if used.
if (AI.FieldByteOffset) {
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
- Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = EmitCastToVoidPtr(Ptr);
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
- Ty.getAddressSpace());
+ const llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
+
+ const llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Extract the piece of the bit-field value to write in this access, limited
@@ -904,8 +938,6 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
AI.TargetBitWidth));
// Extend or truncate to the access size.
- const llvm::Type *AccessLTy =
- llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
if (ResSizeInBits < AI.AccessWidth)
Val = Builder.CreateZExt(Val, AccessLTy);
else if (ResSizeInBits > AI.AccessWidth)
@@ -938,18 +970,6 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
}
-void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
- EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
-}
-
-void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
- EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
-}
-
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst,
QualType Ty) {
@@ -975,7 +995,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
MaskV, "tmp");
@@ -990,8 +1010,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
for (; i != NumDstElts; ++i)
ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
- llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
- ExtMask.size());
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
@@ -1006,7 +1025,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
unsigned Idx = getAccessedFieldNo(i, Elts);
Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
} else {
// We should never shorten the vector
@@ -1040,8 +1059,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
- if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
- VD->isFileVarDecl()) {
+ if (VD->hasGlobalStorage()) {
LV.setGlobalObjCRef(true);
LV.setThreadLocalRef(VD->isThreadSpecified());
}
@@ -1116,7 +1134,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
const Expr *E, const FunctionDecl *FD) {
- llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD);
+ llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -1135,10 +1153,10 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- unsigned Alignment = CGF.getContext().getDeclAlign(ND).getQuantity();
+ unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
if (ND->hasAttr<WeakRefAttr>()) {
- const ValueDecl* VD = cast<ValueDecl>(ND);
+ const ValueDecl *VD = cast<ValueDecl>(ND);
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
return MakeAddrLValue(Aliasee, E->getType(), Alignment);
}
@@ -1149,20 +1167,18 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->hasExternalStorage() || VD->isFileVarDecl())
return EmitGlobalVarDeclLValue(*this, E, VD);
- bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+ bool NonGCable = VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType() &&
+ !VD->hasAttr<BlocksAttr>();
llvm::Value *V = LocalDeclMap[VD];
- if (!V && getContext().getLangOptions().CPlusPlus &&
- VD->isStaticLocal())
+ if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (VD->hasAttr<BlocksAttr>()) {
- V = Builder.CreateStructGEP(V, 1, "forwarding");
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
- VD->getNameAsString());
- }
+ if (VD->hasAttr<BlocksAttr>())
+ V = BuildBlockByrefAddress(V, VD);
+
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
@@ -1174,24 +1190,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
-
- // If we're emitting an instance method as an independent lvalue,
- // we're actually emitting a member pointer.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isInstance()) {
- llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(MD);
- return MakeAddrLValue(V, MD->getType(), Alignment);
- }
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
- return EmitFunctionDeclLValue(*this, E, FD);
-
- // If we're emitting a field as an independent lvalue, we're
- // actually emitting a member pointer.
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(ND)) {
- llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(FD);
- return MakeAddrLValue(V, FD->getType(), Alignment);
- }
-
+
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, fn);
+
assert(false && "Unhandled DeclRefExpr");
// an invalid LValue, but the assert will
@@ -1201,7 +1203,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
unsigned Alignment =
- CGF.getContext().getDeclAlign(E->getDecl()).getQuantity();
+ getContext().getDeclAlign(E->getDecl()).getQuantity();
return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
@@ -1233,9 +1235,22 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
case UO_Real:
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+ llvm::Value *Addr = LV.getAddress();
+
+ // real and imag are valid on scalars. This is a faster way of
+ // testing that.
+ if (!cast<llvm::PointerType>(Addr->getType())
+ ->getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
unsigned Idx = E->getOpcode() == UO_Imag;
return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
- Idx, "idx"),
+ Idx, "idx"),
ExprTy);
}
case UO_PreInc:
@@ -1297,7 +1312,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
CurDecl = getContext().getTranslationUnitDecl();
std::string FunctionName =
- PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl);
+ (isa<BlockDecl>(CurDecl)
+ ? FnName.str()
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
llvm::Constant *C =
CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
@@ -1363,15 +1380,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
+ Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType().getCVRQualifiers());
}
// Extend or truncate the index type to 32 or 64-bits.
- if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
- Idx = Builder.CreateIntCast(Idx, IntPtrTy,
- IdxSigned, "idxprom");
+ if (Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
// FIXME: As llvm implements the object size checking, this can come out.
if (CatchUndefined) {
@@ -1401,17 +1417,12 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, VLASize);
- QualType BaseType = getContext().getBaseElementType(VAT);
-
- CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
- Idx = Builder.CreateUDiv(Idx,
- llvm::ConstantInt::get(Idx->getType(),
- BaseTypeSize.getQuantity()));
-
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
-
- Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
@@ -1420,12 +1431,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
-
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
- Idx, "arrayidx");
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
@@ -1469,7 +1478,7 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
- return llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return llvm::ConstantVector::get(CElts);
}
LValue CodeGenFunction::
@@ -1485,7 +1494,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
Base = MakeAddrLValue(Ptr, PT->getPointeeType());
Base.getQuals().removeObjCGCAttr();
- } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
+ } else if (E->getBase()->isGLValue()) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
// emit the base as an lvalue.
assert(E->getBase()->getType()->isVectorType());
@@ -1507,7 +1516,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
+ llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV,
Base.getVRQualifiers());
}
@@ -1522,7 +1531,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
else
CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
}
- llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ llvm::Constant *CV = llvm::ConstantVector::get(CElts);
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
Base.getVRQualifiers());
}
@@ -1539,12 +1548,6 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
const PointerType *PTy =
BaseExpr->getType()->getAs<PointerType>();
BaseQuals = PTy->getPointeeType().getQualifiers();
- } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
- isa<ObjCImplicitSetterGetterRefExpr>(
- BaseExpr->IgnoreParens())) {
- RValue RV = EmitObjCPropertyGet(BaseExpr);
- BaseValue = RV.getAggregateAddr();
- BaseQuals = BaseExpr->getType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
if (BaseLV.isNonGC())
@@ -1574,8 +1577,8 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
return LValue();
}
-LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
- const FieldDecl* Field,
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(Field->getParent());
@@ -1589,23 +1592,13 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
/// that the base value is a pointer to the enclosing record, derive
/// an lvalue for the ultimate field.
LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
- const FieldDecl *Field,
+ const IndirectFieldDecl *Field,
unsigned CVRQualifiers) {
- llvm::SmallVector<const FieldDecl *, 8> Path;
- Path.push_back(Field);
-
- while (Field->getParent()->isAnonymousStructOrUnion()) {
- const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
- if (!isa<FieldDecl>(VD)) break;
- Field = cast<FieldDecl>(VD);
- Path.push_back(Field);
- }
-
- llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
- I = Path.rbegin(), E = Path.rend();
+ IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
+ IEnd = Field->chain_end();
while (true) {
- LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
- if (++I == E) return LV;
+ LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), CVRQualifiers);
+ if (++I == IEnd) return LV;
assert(LV.isSimple());
BaseValue = LV.getAddress();
@@ -1613,8 +1606,8 @@ LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
}
}
-LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
- const FieldDecl* Field,
+LValue CodeGenFunction::EmitLValueForField(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
if (Field->isBitField())
return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
@@ -1628,7 +1621,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
if (Field->getParent()->isUnion()) {
const llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(Field->getType());
- const llvm::PointerType * BaseTy =
+ const llvm::PointerType *BaseTy =
cast<llvm::PointerType>(BaseValue->getType());
unsigned AS = BaseTy->getAddressSpace();
V = Builder.CreateBitCast(V,
@@ -1650,8 +1643,8 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
}
LValue
-CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
- const FieldDecl* Field,
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
QualType FieldType = Field->getType();
@@ -1669,71 +1662,74 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
return MakeAddrLValue(V, FieldType, Alignment);
}
-LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
- const Expr* InitExpr = E->getInitializer();
+ const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
- EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
+ EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
return Result;
}
-LValue
-CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
- if (E->isLvalue(getContext()) == Expr::LV_Valid) {
- if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
- Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
- if (Live)
- return EmitLValue(Live);
- }
+LValue CodeGenFunction::
+EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(expr->getType()) &&
+ !expr->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
- if (!E->getLHS())
- return EmitUnsupportedLValue(E, "conditional operator with missing LHS");
+ const Expr *condExpr = expr->getCond();
- llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
- llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
-
- EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ if (int condValue = ConstantFoldsToSimpleInteger(condExpr)) {
+ const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
+ if (condValue == -1) std::swap(live, dead);
+
+ if (!ContainsLabel(dead))
+ return EmitLValue(live);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+
+ llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
- // Any temporaries created here are conditional.
- BeginConditionalBranch();
- EmitBlock(LHSBlock);
- LValue LHS = EmitLValue(E->getLHS());
- EndConditionalBranch();
+ // Any temporaries created here are conditional.
+ EmitBlock(lhsBlock);
+ eval.begin(*this);
+ LValue lhs = EmitLValue(expr->getTrueExpr());
+ eval.end(*this);
- if (!LHS.isSimple())
- return EmitUnsupportedLValue(E, "conditional operator");
+ if (!lhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
- // FIXME: We shouldn't need an alloca for this.
- llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
- Builder.CreateStore(LHS.getAddress(), Temp);
- EmitBranch(ContBlock);
-
- // Any temporaries created here are conditional.
- BeginConditionalBranch();
- EmitBlock(RHSBlock);
- LValue RHS = EmitLValue(E->getRHS());
- EndConditionalBranch();
- if (!RHS.isSimple())
- return EmitUnsupportedLValue(E, "conditional operator");
-
- Builder.CreateStore(RHS.getAddress(), Temp);
- EmitBranch(ContBlock);
-
- EmitBlock(ContBlock);
+ lhsBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(contBlock);
- Temp = Builder.CreateLoad(Temp, "lv");
- return MakeAddrLValue(Temp, E->getType());
- }
-
- // ?: here should be an aggregate.
- assert((hasAggregateLLVMType(E->getType()) &&
- !E->getType()->isAnyComplexType()) &&
- "Unexpected conditional operator!");
+ // Any temporaries created here are conditional.
+ EmitBlock(rhsBlock);
+ eval.begin(*this);
+ LValue rhs = EmitLValue(expr->getFalseExpr());
+ eval.end(*this);
+ if (!rhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+ rhsBlock = Builder.GetInsertBlock();
- return EmitAggExprToLValue(E);
+ EmitBlock(contBlock);
+
+ llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(),
+ "cond-lvalue");
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(lhs.getAddress(), lhsBlock);
+ phi->addIncoming(rhs.getAddress(), rhsBlock);
+ return MakeAddrLValue(phi, expr->getType());
}
/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
@@ -1747,36 +1743,57 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_ToVoid:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
-
- case CK_NoOp:
- if (E->getSubExpr()->Classify(getContext()).getKind()
- != Expr::Classification::CL_PRValue) {
- LValue LV = EmitLValue(E->getSubExpr());
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getSubExpr()->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert(!RV.isScalar() && "EmitCastLValue-scalar cast of property ref");
- llvm::Value *V = RV.getAggregateAddr();
- return MakeAddrLValue(V, QT);
- }
- return LV;
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_GetObjCProperty: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isPropertyRef());
+ RValue RV = EmitLoadOfPropertyRefLValue(LV);
+
+ // Property is an aggregate r-value.
+ if (RV.isAggregate()) {
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
}
+
+ // Implicit property returns an l-value.
+ assert(RV.isScalar());
+ return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
+ }
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ if (!E->getSubExpr()->Classify(getContext()).isPRValue()
+ || E->getType()->isRecordType())
+ return EmitLValue(E->getSubExpr());
// Fall through to synthesize a temporary.
-
- case CK_Unknown:
+
case CK_BitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
+ case CK_NullToPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
case CK_VectorSplat:
case CK_IntegralCast:
+ case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
@@ -1810,17 +1827,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getSubExpr()->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert (!RV.isScalar() && "EmitCastLValue");
- This = RV.getAggregateAddr();
- }
- else
- This = LV.getAddress();
+ llvm::Value *This = LV.getAddress();
// Perform the derived-to-base conversion
llvm::Value *Base =
@@ -1876,6 +1883,11 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
return LV;
}
+LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(e->isGLValue() || e->getType()->isRecordType());
+ return getOpaqueLValueMapping(e);
+}
+
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
@@ -1922,7 +1934,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
// Comma expressions just emit their LHS then their RHS as an l-value.
if (E->getOpcode() == BO_Comma) {
- EmitAnyExpr(E->getLHS());
+ EmitIgnoredExpr(E->getLHS());
EnsureInsertPoint();
return EmitLValue(E->getRHS());
}
@@ -1930,20 +1942,20 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemD ||
E->getOpcode() == BO_PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
-
- // Can only get l-value for binary operator expressions which are a
- // simple assignment of aggregate type.
- if (E->getOpcode() != BO_Assign)
- return EmitUnsupportedLValue(E, "binary l-value expression");
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
if (!hasAggregateLLVMType(E->getType())) {
- // Emit the LHS as an l-value.
+ // __block variables need the RHS evaluated first.
+ RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitLValue(E->getLHS());
- // Store the value through the l-value.
- EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType());
+ EmitStoreThroughLValue(RV, LV, E->getType());
return LV;
}
-
+
+ if (E->getType()->isAnyComplexType())
+ return EmitComplexAssignmentLValue(E);
+
return EmitAggExprToLValue(E);
}
@@ -1966,9 +1978,11 @@ LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
}
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
- llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
- EmitCXXConstructExpr(Temp, E);
- return MakeAddrLValue(Temp, E->getType());
+ assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
+ && "binding l-value to type which needs a temporary");
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "tmp");
+ EmitCXXConstructExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
}
LValue
@@ -1978,9 +1992,11 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
- LValue LV = EmitLValue(E->getSubExpr());
- EmitCXXTemporary(E->getTemporary(), LV.getAddress());
- return LV;
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ Slot.setLifetimeExternallyManaged();
+ EmitAggExpr(E->getSubExpr(), Slot);
+ EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
@@ -2040,24 +2056,6 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
return LV;
}
-LValue
-CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
- // This is a special l-value that just issues sends when we load or store
- // through it.
- return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
-}
-
-LValue CodeGenFunction::EmitObjCKVCRefLValue(
- const ObjCImplicitSetterGetterRefExpr *E) {
- // This is a special l-value that just issues sends when we load or store
- // through it.
- return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
-}
-
-LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
- return EmitUnsupportedLValue(E, "use of super");
-}
-
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
@@ -2078,7 +2076,6 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const FunctionType *FnType
= cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
- QualType ResultType = FnType->getResultType();
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
@@ -2105,4 +2102,3 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
return MakeAddrLValue(AddV, MPT->getPointeeType());
}
-
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 28c8b3545b38..f992dc7c9cb9 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -32,27 +32,28 @@ namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
- llvm::Value *DestPtr;
- bool VolatileDest;
+ AggValueSlot Dest;
bool IgnoreResult;
- bool IsInitializer;
- bool RequiresGCollection;
ReturnValueSlot getReturnValueSlot() const {
// If the destination slot requires garbage collection, we can't
// use the real return value slot, because we have to use the GC
// API.
- if (RequiresGCollection) return ReturnValueSlot();
+ if (Dest.requiresGCollection()) return ReturnValueSlot();
- return ReturnValueSlot(DestPtr, VolatileDest);
+ return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
+ }
+
+ AggValueSlot EnsureSlot(QualType T) {
+ if (!Dest.isIgnored()) return Dest;
+ return CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
public:
- AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
- bool ignore, bool isinit, bool requiresGCollection)
- : CGF(cgf), Builder(CGF.Builder),
- DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
- IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+ IgnoreResult(ignore) {
}
//===--------------------------------------------------------------------===//
@@ -114,9 +115,8 @@ public:
EmitAggLoadOfLValue(E);
}
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
- void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
@@ -125,10 +125,12 @@ public:
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
- void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
void VisitVAArgExpr(VAArgExpr *E);
void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
@@ -176,13 +178,13 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// directly into the return value slot. If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (RequiresGCollection) {
+ if (Dest.requiresGCollection()) {
std::pair<uint64_t, unsigned> TypeInfo =
CGF.getContext().getTypeInfo(E->getType());
unsigned long size = TypeInfo.first/8;
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
Src.getAggregateAddr(),
SizeVal);
}
@@ -192,13 +194,13 @@ void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
assert(Src.isAggregate() && "value must be aggregate value!");
- // If DestPtr is null, then we're evaluating an aggregate expression
+ // If Dest is ignored, then we're evaluating an aggregate expression
// in a context (like an expression statement) that doesn't care
// about the result. C says that an lvalue-to-rvalue conversion is
// performed in these cases; C++ says that it is not. In either
// case, we don't actually need to do anything unless the value is
// volatile.
- if (DestPtr == 0) {
+ if (Dest.isIgnored()) {
if (!Src.isVolatileQualified() ||
CGF.CGM.getLangOptions().CPlusPlus ||
(IgnoreResult && Ignore))
@@ -206,26 +208,27 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// If the source is volatile, we must read from it; to do that, we need
// some place to put it.
- DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
+ Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
}
- if (RequiresGCollection) {
+ if (Dest.requiresGCollection()) {
std::pair<uint64_t, unsigned> TypeInfo =
CGF.getContext().getTypeInfo(E->getType());
unsigned long size = TypeInfo.first/8;
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- DestPtr, Src.getAggregateAddr(),
- SizeVal);
+ Dest.getAddr(),
+ Src.getAggregateAddr(),
+ SizeVal);
return;
}
// If the result of the assignment is used, copy the LHS there also.
// FIXME: Pass VolatileDest as well. I think we also need to merge volatile
// from the source as well, as we can't eliminate it if either operand
// is volatile, unless copy has volatile for both source and destination..
- CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
- VolatileDest|Src.isVolatileQualified());
+ CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
+ Dest.isVolatile()|Src.isVolatileQualified());
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -241,15 +244,17 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
// Visitor Methods
//===----------------------------------------------------------------------===//
+void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+}
+
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
- if (!DestPtr && E->getCastKind() != CK_Dynamic) {
+ if (Dest.isIgnored() && E->getCastKind() != CK_Dynamic) {
Visit(E->getSubExpr());
return;
}
switch (E->getCastKind()) {
- default: assert(0 && "Unhandled cast kind!");
-
case CK_Dynamic: {
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
@@ -259,8 +264,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
- if (DestPtr)
- CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ if (!Dest.isIgnored())
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
break;
}
@@ -268,7 +273,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
QualType PtrTy = CGF.getContext().getPointerType(Ty);
- llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
+ llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
CGF.ConvertType(PtrTy));
EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
Ty);
@@ -283,8 +288,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
- // FIXME: Remove the CK_Unknown check here.
- case CK_Unknown:
+ case CK_GetObjCProperty: {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ assert(LV.isPropertyRef());
+ RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
+ EmitGCMove(E, RV);
+ break;
+ }
+
+ case CK_LValueToRValue: // hope for downstream optimization
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
@@ -293,10 +305,45 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"Implicit cast types must be compatible");
Visit(E->getSubExpr());
break;
-
+
case CK_LValueBitCast:
- llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
break;
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -316,24 +363,18 @@ void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
}
void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
- EmitGCMove(E, RV);
-}
-
-void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ llvm_unreachable("direct property access not surrounded by "
+ "lvalue-to-rvalue cast");
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
- CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
- /*IgnoreResult=*/false, IsInitializer);
+ CGF.EmitIgnoredExpr(E->getLHS());
+ Visit(E->getRHS());
}
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
- CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
}
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
@@ -355,64 +396,62 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType())
&& "Invalid assignment");
+
+ // FIXME: __block variables need the RHS evaluated first!
LValue LHS = CGF.EmitLValue(E->getLHS());
// We have to special case property setters, otherwise we must have
// a simple lvalue (no aggregates inside vectors, bitfields).
if (LHS.isPropertyRef()) {
- llvm::Value *AggLoc = DestPtr;
- if (!AggLoc)
- AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
- RValue::getAggregate(AggLoc, VolatileDest));
- } else if (LHS.isKVCRef()) {
- llvm::Value *AggLoc = DestPtr;
- if (!AggLoc)
- AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
- RValue::getAggregate(AggLoc, VolatileDest));
+ AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), Slot);
+ CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS);
} else {
- bool RequiresGCollection = false;
+ bool GCollection = false;
if (CGF.getContext().getLangOptions().getGCMode())
- RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType());
+ GCollection = TypeRequiresGCollection(E->getLHS()->getType());
// Codegen the RHS so that it stores directly into the LHS.
- CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
- false, false, RequiresGCollection);
+ AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
+ GCollection);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
EmitFinalDestCopy(E, LHS, true);
}
}
-void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
- if (!E->getLHS()) {
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- return;
- }
-
+void AggExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
- CGF.BeginConditionalBranch();
+ // Save whether the destination's lifetime is externally managed.
+ bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
+
+ eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
+ Visit(E->getTrueExpr());
+ eval.end(CGF);
- // Handle the GNU extension for missing LHS.
- assert(E->getLHS() && "Must have LHS for aggregate value");
+ assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
+ CGF.Builder.CreateBr(ContBlock);
- Visit(E->getLHS());
- CGF.EndConditionalBranch();
- CGF.EmitBranch(ContBlock);
+ // If the result of an agg expression is unused, then the emission
+ // of the LHS might need to create a destination slot. That's fine
+ // with us, and we can safely emit the RHS into the same slot, but
+ // we shouldn't claim that its lifetime is externally managed.
+ Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
-
- Visit(E->getRHS());
- CGF.EndConditionalBranch();
- CGF.EmitBranch(ContBlock);
+ Visit(E->getFalseExpr());
+ eval.end(CGF);
CGF.EmitBlock(ContBlock);
}
@@ -434,65 +473,78 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
-
- // FIXME: volatile
- CGF.EmitAggExpr(E->getSubExpr(), Val, false);
- } else
- Visit(E->getSubExpr());
-
- // Don't make this a live temporary if we're emitting an initializer expr.
- if (!IsInitializer)
- CGF.EmitCXXTemporary(E->getTemporary(), Val);
+ // Ensure that we have a slot, but if we already do, remember
+ // whether its lifetime was externally managed.
+ bool WasManaged = Dest.isLifetimeExternallyManaged();
+ Dest = EnsureSlot(E->getType());
+ Dest.setLifetimeExternallyManaged();
+
+ Visit(E->getSubExpr());
+
+ // Set up the temporary's destructor if its lifetime wasn't already
+ // being managed.
+ if (!WasManaged)
+ CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
-
- CGF.EmitCXXConstructExpr(Val, E);
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitCXXConstructExpr(E, Slot);
}
-void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- llvm::Value *Val = DestPtr;
-
- CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
+void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.EmitExprWithCleanups(E, Dest);
}
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
- E->getType());
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
- llvm::Value *Val = DestPtr;
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+}
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
- E->getType());
+/// isSimpleZero - If emitting this value will obviously just cause a store of
+/// zero to memory, return true. This can return false if uncertain, so it just
+/// handles simple cases.
+static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
+ // (0)
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isSimpleZero(PE->getSubExpr(), CGF);
+ // 0
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return IL->getValue() == 0;
+ // +0.0
+ if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
+ return FL->getValue().isPosZero();
+ // int()
+ if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ return true;
+ // (int*)0 - Null pointer expressions.
+ if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
+ return ICE->getCastKind() == CK_NullToPointer;
+ // '\0'
+ if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
+ return CL->getValue() == 0;
+
+ // Otherwise, hard case: conservatively return false.
+ return false;
}
+
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
- if (isa<ImplicitValueInitExpr>(E)) {
+ if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
+ // Storing "i32 0" to a zero'd memory location is a noop.
+ } else if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
@@ -500,13 +552,19 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(T)) {
- CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
+ false, Dest.isZeroed()));
} else {
- CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
}
}
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ // If the destination slot is already zeroed out before the aggregate is
+ // copied into it, we don't have to emit any zeros here.
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T))
+ return;
+
if (!CGF.hasAggregateLLVMType(T)) {
// For non-aggregates, we can store zero
llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
@@ -534,9 +592,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
#endif
- if (E->hadArrayRangeDesignator()) {
+ if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- }
+
+ llvm::Value *DestPtr = Dest.getAddr();
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
@@ -563,13 +622,27 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: were we intentionally ignoring address spaces and GC attributes?
for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ // If we're done emitting initializers and the destination is known-zeroed
+ // then we're done.
+ if (i == NumInitElements &&
+ Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(ElementType))
+ break;
+
llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
+
if (i < NumInitElements)
EmitInitializationToLValue(E->getInit(i), LV, ElementType);
-
else
EmitNullInitializationToLValue(LV, ElementType);
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(NextVal))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
}
return;
}
@@ -583,16 +656,6 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
unsigned NumInitElements = E->getNumInits();
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
- // If we're initializing the whole aggregate, just do it in place.
- // FIXME: This is a hack around an AST bug (PR6537).
- if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
- EmitInitializationToLValue(E->getInit(0),
- CGF.MakeAddrLValue(DestPtr, E->getType()),
- E->getType());
- return;
- }
-
-
if (E->getType()->isUnionType()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
@@ -612,13 +675,13 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: volatility
FieldDecl *Field = E->getInitializedFieldInUnion();
- LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
if (NumInitElements) {
// Store the initializer into the field
EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
} else {
- // Default-initialize to null
+ // Default-initialize to null.
EmitNullInitializationToLValue(FieldLoc, Field->getType());
}
@@ -638,10 +701,16 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (Field->isUnnamedBitfield())
continue;
+ // Don't emit GEP before a noop store of zero.
+ if (CurInitVal == NumInitElements && Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ break;
+
// FIXME: volatility
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
// We never generate write-barries for initialized fields.
FieldLoc.setNonGC(true);
+
if (CurInitVal < NumInitElements) {
// Store the initializer into the field.
EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
@@ -650,6 +719,14 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We're out of initalizers; default-initialize to null
EmitNullInitializationToLValue(FieldLoc, Field->getType());
}
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (FieldLoc.isSimple())
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress()))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
}
}
@@ -657,31 +734,126 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Entry Points into this File
//===----------------------------------------------------------------------===//
+/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
+/// non-zero bytes that will be stored when outputting the initializer for the
+/// specified initializer expression.
+static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return GetNumNonZeroBytesInInit(PE->getSubExpr(), CGF);
+
+ // 0 and 0.0 won't require any non-zero stores!
+ if (isSimpleZero(E, CGF)) return 0;
+
+ // If this is an initlist expr, sum up the size of sizes of the (present)
+ // elements. If this is something weird, assume the whole thing is non-zero.
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
+ return CGF.getContext().getTypeSize(E->getType())/8;
+
+ // InitListExprs for structs have to be handled carefully. If there are
+ // reference members, we need to consider the size of the reference, not the
+ // referencee. InitListExprs for unions and arrays can't have references.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (!RT->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ uint64_t NumNonZeroBytes = 0;
+
+ unsigned ILEElement = 0;
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member or run out of
+ // InitListExpr elements.
+ if (Field->getType()->isIncompleteArrayType() ||
+ ILEElement == ILE->getNumInits())
+ break;
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ const Expr *E = ILE->getInit(ILEElement++);
+
+ // Reference values are always non-null and have the width of a pointer.
+ if (Field->getType()->isReferenceType())
+ NumNonZeroBytes += CGF.getContext().Target.getPointerWidth(0);
+ else
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
+ }
+
+ return NumNonZeroBytes;
+ }
+ }
+
+
+ uint64_t NumNonZeroBytes = 0;
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
+ return NumNonZeroBytes;
+}
+
+/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
+/// zeros in it, emit a memset and avoid storing the individual zeros.
+///
+static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
+ CodeGenFunction &CGF) {
+ // If the slot is already known to be zeroed, nothing to do. Don't mess with
+ // volatile stores.
+ if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
+
+ // If the type is 16-bytes or smaller, prefer individual stores over memset.
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ if (TypeInfo.first/8 <= 16)
+ return;
+
+ // Check to see if over 3/4 of the initializer are known to be zero. If so,
+ // we prefer to emit memset + individual stores for the rest.
+ uint64_t NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > TypeInfo.first/8)
+ return;
+
+ // Okay, it seems like a good idea to use an initial memset, emit the call.
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first/8);
+ unsigned Align = TypeInfo.second/8;
+
+ llvm::Value *Loc = Slot.getAddr();
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+
+ Loc = CGF.Builder.CreateBitCast(Loc, BP);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, Align, false);
+
+ // Tell the AggExprEmitter that the slot is known zero.
+ Slot.setZeroed();
+}
+
+
+
+
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
+///
+/// \param IsInitializer - true if this evaluation is initializing an
+/// object whose lifetime is already being managed.
//
// FIXME: Take Qualifiers object.
-void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
- bool VolatileDest, bool IgnoreResult,
- bool IsInitializer,
- bool RequiresGCollection) {
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
+ bool IgnoreResult) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
- assert ((DestPtr != 0 || VolatileDest == false)
- && "volatile aggregate can't be 0");
+ assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
+ "slot has bits but no address");
- AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
- RequiresGCollection)
- .Visit(const_cast<Expr*>(E));
+ // Optimize the slot if possible.
+ CheckAggExprForMemSetUse(Slot, E, *this);
+
+ AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
}
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, Temp, LV.isVolatileQualified());
+ EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false));
return LV;
}
@@ -734,12 +906,12 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
const llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace());
+ llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
const llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace());
+ llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
@@ -766,11 +938,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
- Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
- IntPtrTy),
- DestPtr, SrcPtr,
- // TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
- Builder.getInt32(TypeInfo.second/8),
- Builder.getInt1(isVolatile));
+ Builder.CreateMemCpy(DestPtr, SrcPtr,
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ TypeInfo.second/8, isVolatile);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 9a98281771fa..bba7864bff98 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -11,9 +11,11 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGDebugInfo.h"
#include "llvm/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -51,9 +53,65 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
Callee, ReturnValue, Args, MD);
}
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
/// expr can be devirtualized.
-static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
+ const Expr *Base,
+ const CXXMethodDecl *MD) {
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOptions().AppleKext)
+ return false;
+
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// This is a record decl. We know the type and can devirtualize it.
@@ -74,11 +132,13 @@ static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
// Check if this is a call expr that returns a record type.
if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
return CE->getCallReturnType()->isRecordType();
-
+
// We can't devirtualize the call.
return false;
}
+// Note: This function also emit constructor calls to support a MSVC
+// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
ReturnValueSlot ReturnValue) {
if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
@@ -87,6 +147,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ && !isa<CallExpr>(ME->getBase())) {
+ QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ MD->getParent()->getLocation());
+ }
+ }
+
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
@@ -98,32 +168,47 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
llvm::Value *This;
if (ME->isArrow())
This = EmitScalarExpr(ME->getBase());
- else {
- LValue BaseLV = EmitLValue(ME->getBase());
- This = BaseLV.getAddress();
- }
+ else
+ This = EmitLValue(ME->getBase()).getAddress();
if (MD->isTrivial()) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
-
- assert(MD->isCopyAssignment() && "unknown trivial member function");
- // We don't like to generate the trivial copy assignment operator when
- // it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
- return RValue::get(This);
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
+ return RValue::get(0);
+
+ if (MD->isCopyAssignmentOperator()) {
+ // We don't like to generate the trivial copy assignment operator when
+ // it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
+ CE->arg_begin(), CE->arg_end());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
}
// Compute the function type we're calling.
- const CGFunctionInfo &FInfo =
- (isa<CXXDestructorDecl>(MD)
- ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete)
- : CGM.getTypes().getFunctionInfo(MD));
+ const CGFunctionInfo *FInfo = 0;
+ if (isa<CXXDestructorDecl>(MD))
+ FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ else if (isa<CXXConstructorDecl>(MD))
+ FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
+ else
+ FInfo = &CGM.getTypes().getFunctionInfo(MD);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
+ = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -131,20 +216,34 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
- bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
- && !canDevirtualizeMemberFunctionCalls(ME->getBase());
-
+ bool UseVirtualCall;
+ UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(getContext(),
+ ME->getBase(), MD);
llvm::Value *Callee;
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ if (getContext().getLangOptions().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
}
+ } else if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(MD)) {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
} else if (UseVirtualCall) {
- Callee = BuildVirtualCall(MD, This, Ty);
+ Callee = BuildVirtualCall(MD, This, Ty);
} else {
- Callee = CGM.GetAddrOfFunction(MD, Ty);
+ if (getContext().getLangOptions().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
}
return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
@@ -180,7 +279,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *Callee =
- CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
CallArgList Args;
@@ -203,29 +302,14 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- if (MD->isCopyAssignment()) {
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This = LV.getAddress();
+
+ if (MD->isCopyAssignmentOperator()) {
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
if (ClassDecl->hasTrivialCopyAssignment()) {
assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
"EmitCXXOperatorMemberCallExpr - user declared copy assignment");
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
- EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
- if (LV.isPropertyRef())
- EmitObjCPropertySet(LV.getPropertyRefExpr(),
- RValue::getAggregate(AggLoc,
- false /*VolatileDest*/));
- else
- EmitObjCPropertySet(LV.getKVCRefExpr(),
- RValue::getAggregate(AggLoc,
- false /*VolatileDest*/));
- return RValue::getAggregate(0, false);
- }
- else
- This = LV.getAddress();
-
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
EmitAggregateCopy(This, Src, Ty);
@@ -237,21 +321,10 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getArg(0)->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
- This = RV.getAggregateAddr();
- }
- else
- This = LV.getAddress();
-
llvm::Value *Callee;
- if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+ if (MD->isVirtual() &&
+ !canDevirtualizeMemberFunctionCalls(getContext(),
+ E->getArg(0), MD))
Callee = BuildVirtualCall(MD, This, Ty);
else
Callee = CGM.GetAddrOfFunction(MD, Ty);
@@ -261,28 +334,29 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
}
void
-CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
- const CXXConstructExpr *E) {
- assert(Dest && "Must have a destination!");
+CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+ AggValueSlot Dest) {
+ assert(!Dest.isIgnored() && "Must have a destination!");
const CXXConstructorDecl *CD = E->getConstructor();
// If we require zero initialization before (or instead of) calling the
// constructor, as can be the case with a non-user-provided default
// constructor, emit the zero initialization now.
if (E->requiresZeroInitialization())
- EmitNullInitialization(Dest, E->getType());
-
+ EmitNullInitialization(Dest.getAddr(), E->getType());
// If this is a call to a trivial default constructor, do nothing.
if (CD->isTrivial() && CD->isDefaultConstructor())
return;
- // Code gen optimization to eliminate copy constructor and return
- // its first argument instead, if in fact that argument is a temporary
- // object.
+ // Elide the constructor if we're constructing from a temporary.
+ // The temporary check is required because Sema sets this on NRVO
+ // returns.
if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
- if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
- EmitAggExpr(Arg, Dest, false);
+ assert(getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(0)->getType()));
+ if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
+ EmitAggExpr(E->getArg(0), Dest);
return;
}
}
@@ -294,7 +368,7 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest, BasePtr);
+ Builder.CreateBitCast(Dest.getAddr(), BasePtr);
EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
E->arg_begin(), E->arg_end());
@@ -307,11 +381,36 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
// Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
E->arg_begin(), E->arg_end());
}
}
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
+ llvm::Value *Src,
+ const Expr *Exp) {
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
+ Exp = E->getSubExpr();
+ assert(isa<CXXConstructExpr>(Exp) &&
+ "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
+ const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+ const CXXConstructorDecl *CD = E->getConstructor();
+ RunCleanupsScope Scope(*this);
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ // FIXME. Do I still need this for a copy ctor synthesis?
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+ assert(!getContext().getAsConstantArrayType(E->getType())
+ && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
+ E->arg_begin(), E->arg_end());
+}
+
/// Check whether the given operator new[] is the global placement
/// operator new[].
static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
@@ -341,7 +440,7 @@ static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
return CharUnits::Zero();
- return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
}
static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
@@ -387,7 +486,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
unsigned SizeWidth = NEC.getBitWidth();
// Determine if there is an overflow here by doing an extended multiply.
- NEC.zext(SizeWidth*2);
+ NEC = NEC.zext(SizeWidth*2);
llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
SC *= NEC;
@@ -396,8 +495,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
// overflow's already happened because SizeWithoutCookie isn't
// used if the allocator returns null or throws, as it should
// always do on an overflow.
- llvm::APInt SWC = SC;
- SWC.trunc(SizeWidth);
+ llvm::APInt SWC = SC.trunc(SizeWidth);
SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
// Add the cookie size.
@@ -405,7 +503,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
}
if (SC.countLeadingZeros() >= SizeWidth) {
- SC.trunc(SizeWidth);
+ SC = SC.trunc(SizeWidth);
Size = llvm::ConstantInt::get(SizeTy, SC);
} else {
// On overflow, produce a -1 so operator new throws.
@@ -531,8 +629,11 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
- else
- CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+ else {
+ AggValueSlot Slot
+ = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
+ CGF.EmitAggExpr(Init, Slot);
+ }
}
void
@@ -591,18 +692,10 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
llvm::Value *NewPtr, llvm::Value *Size) {
- llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
- if (NewPtr->getType() != BP)
- NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
-
- CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
- llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
- Size,
- llvm::ConstantInt::get(CGF.Int32Ty,
- CGF.getContext().getTypeAlign(T)/8),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
- 0));
+ CGF.EmitCastToVoidPtr(NewPtr);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
+ CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
+ Alignment.getQuantity(), false);
}
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
@@ -669,6 +762,163 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
}
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression.
+ class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *Ptr;
+ llvm::Value *AllocSize;
+
+ RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(RValue);
+ }
+
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ llvm::Value *AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, RValue Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2)
+ DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I)
+ DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression when the new expression is
+ /// conditional.
+ class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ DominatingValue<RValue>::saved_type Ptr;
+ DominatingValue<RValue>::saved_type AllocSize;
+
+ DominatingValue<RValue>::saved_type *getPlacementArgs() {
+ return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ }
+
+ CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ DominatingValue<RValue>::saved_type Ptr,
+ DominatingValue<RValue>::saved_type AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2) {
+ RValue RV = AllocSize.restore(CGF);
+ DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ }
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+ RValue RV = getPlacementArgs()[I].restore(CGF);
+ DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ }
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+}
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *AllocSize,
+ const CallArgList &NewArgs) {
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!CGF.isInConditionalBranch()) {
+ CallDeleteDuringNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr, AllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I, NewArgs[I+1].first);
+
+ return;
+ }
+
+ // Otherwise, we need to save all this stuff.
+ DominatingValue<RValue>::saved_type SavedNewPtr =
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::saved_type SavedAllocSize =
+ DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+
+ CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I,
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
+
+ CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
+}
+
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType AllocType = E->getAllocatedType();
if (AllocType->isArrayType())
@@ -757,13 +1007,22 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
CalculateCookiePadding(*this, E).isZero());
if (AllocSize != AllocSizeWithoutCookie) {
assert(E->isArray());
- NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
- AllocType);
+ NewPtr = CGM.getCXXABI().InitializeArrayCookie(*this, NewPtr, NumElements,
+ E, AllocType);
+ }
+
+ // If there's an operator delete, enter a cleanup to call it if an
+ // exception is thrown.
+ EHScopeStack::stable_iterator CallOperatorDelete;
+ if (E->getOperatorDelete()) {
+ EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
+ CallOperatorDelete = EHStack.stable_begin();
}
const llvm::Type *ElementPtrTy
= ConvertTypeForMem(AllocType)->getPointerTo(AS);
NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+
if (E->isArray()) {
EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
@@ -776,6 +1035,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
} else {
EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
}
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (CallOperatorDelete.isValid())
+ DeactivateCleanupBlock(CallOperatorDelete);
if (NullCheckResult) {
Builder.CreateBr(NewEnd);
@@ -876,6 +1140,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
// Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
@@ -950,18 +1216,19 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
- const FunctionDecl *OperatorDelete,
+ const CXXDeleteExpr *E,
llvm::Value *Ptr,
QualType ElementType) {
llvm::Value *NumElements = 0;
llvm::Value *AllocatedPtr = 0;
CharUnits CookieSize;
- CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
NumElements, AllocatedPtr, CookieSize);
assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
// Make sure that we call delete even if one of the dtors throws.
+ const FunctionDecl *OperatorDelete = E->getOperatorDelete();
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
AllocatedPtr, OperatorDelete,
NumElements, ElementType,
@@ -1031,7 +1298,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
cast<llvm::PointerType>(Ptr->getType())->getElementType());
if (E->isArrayForm()) {
- EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
}
@@ -1039,7 +1306,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
EmitBlock(DeleteEnd);
}
-llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
QualType Ty = E->getType();
const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
@@ -1059,8 +1326,6 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// FIXME: if subE is an lvalue do
LValue Obj = EmitLValue(subE);
llvm::Value *This = Obj.getAddress();
- LTy = LTy->getPointerTo()->getPointerTo();
- llvm::Value *V = Builder.CreateBitCast(This, LTy);
// We need to do a zero check for *p, unless it has NonNullAttr.
// FIXME: PointerType->hasAttr<NonNullAttr>()
bool CanBeZero = false;
@@ -1071,12 +1336,12 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
llvm::BasicBlock *NonZeroBlock = createBasicBlock();
llvm::BasicBlock *ZeroBlock = createBasicBlock();
- llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
- Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
+ llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
+ Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
NonZeroBlock, ZeroBlock);
EmitBlock(ZeroBlock);
/// Call __cxa_bad_typeid
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::FunctionType *FTy;
FTy = llvm::FunctionType::get(ResultType, false);
llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
@@ -1084,7 +1349,7 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
Builder.CreateUnreachable();
EmitBlock(NonZeroBlock);
}
- V = Builder.CreateLoad(V, "vtable");
+ llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
V = Builder.CreateLoad(V);
return V;
@@ -1141,23 +1406,20 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
// See if this is a dynamic_cast(void*)
if (ToVoid) {
llvm::Value *This = V;
- V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
- V = Builder.CreateLoad(V, "vtable");
+ V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
V = Builder.CreateLoad(V, "offset to top");
- This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
+ This = EmitCastToVoidPtr(This);
V = Builder.CreateInBoundsGEP(This, V);
V = Builder.CreateBitCast(V, LTy);
} else {
/// Call __dynamic_cast
- const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *ResultType = Int8PtrTy;
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *PtrToInt8Ty
- = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(PtrDiffTy);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
@@ -1172,7 +1434,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
llvm::Value *DestArg
= CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
- V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
V, SrcArg, DestArg, hint);
V = Builder.CreateBitCast(V, LTy);
@@ -1182,7 +1444,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
EmitBlock(BadCastBlock);
/// Invoke __cxa_bad_cast
- ResultType = llvm::Type::getVoidTy(VMContext);
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::FunctionType *FBadTy;
FBadTy = llvm::FunctionType::get(ResultType, false);
llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 79e9dd42ee28..7b0292b8f225 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -35,14 +35,9 @@ class ComplexExprEmitter
// True is we should ignore the value of a
bool IgnoreReal;
bool IgnoreImag;
- // True if we should ignore the value of a=b
- bool IgnoreRealAssign;
- bool IgnoreImagAssign;
public:
- ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
- bool irn=false, bool iin=false)
- : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
- IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
}
@@ -60,36 +55,36 @@ public:
IgnoreImag = false;
return I;
}
- bool TestAndClearIgnoreRealAssign() {
- bool I = IgnoreRealAssign;
- IgnoreRealAssign = false;
- return I;
- }
- bool TestAndClearIgnoreImagAssign() {
- bool I = IgnoreImagAssign;
- IgnoreImagAssign = false;
- return I;
- }
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
ComplexPairTy EmitLoadOfLValue(const Expr *E) {
- LValue LV = CGF.EmitLValue(E);
+ return EmitLoadOfLValue(CGF.EmitLValue(E));
+ }
+
+ ComplexPairTy EmitLoadOfLValue(LValue LV) {
if (LV.isSimple())
return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
- if (LV.isPropertyRef())
- return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
-
- assert(LV.isKVCRef() && "Unknown LValue type!");
- return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
}
/// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
/// the real and imaginary pieces.
ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+ /// EmitStoreThroughLValue - Given an l-value of complex type, store
+ /// a complex number into it.
+ void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
+ if (LV.isSimple())
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
+
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ CGF.EmitStoreThroughPropertyRefLValue(RValue::getComplex(Val), LV);
+ }
+
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
@@ -102,6 +97,10 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
+ ComplexPairTy Visit(Expr *E) {
+ return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
+ }
+
ComplexPairTy VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
assert(0 && "Stmt can't have complex result type!");
@@ -117,10 +116,7 @@ public:
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- return EmitLoadOfLValue(E);
- }
- ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
+ assert(E->getObjectKind() == OK_Ordinary);
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -128,6 +124,11 @@ public:
}
ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ }
// FIXME: CompoundLiteralExpr
@@ -165,8 +166,6 @@ public:
ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
return Visit(E->getSubExpr());
}
ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
@@ -178,8 +177,8 @@ public:
ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
- ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
+ return CGF.EmitExprWithCleanups(E).getComplexVal();
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
@@ -202,6 +201,10 @@ public:
};
BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ ComplexPairTy &Val);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &));
@@ -211,15 +214,15 @@ public:
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
- ComplexPairTy VisitBinMul(const BinaryOperator *E) {
- return EmitBinMul(EmitBinOps(E));
- }
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
ComplexPairTy VisitBinSub(const BinaryOperator *E) {
return EmitBinSub(EmitBinOps(E));
}
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
return EmitBinDiv(EmitBinOps(E));
}
@@ -242,11 +245,15 @@ public:
// Logical and/or always return int, never complex.
// No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
ComplexPairTy VisitBinAssign (const BinaryOperator *E);
ComplexPairTy VisitBinComma (const BinaryOperator *E);
- ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
ComplexPairTy VisitInitListExpr(InitListExpr *E);
@@ -265,13 +272,13 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
bool isVolatile) {
llvm::Value *Real=0, *Imag=0;
- if (!IgnoreReal) {
+ if (!IgnoreReal || isVolatile) {
llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
SrcPtr->getName() + ".realp");
Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
}
- if (!IgnoreImag) {
+ if (!IgnoreImag || isVolatile) {
llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
SrcPtr->getName() + ".imagp");
Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
@@ -307,8 +314,7 @@ ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
ComplexPairTy ComplexExprEmitter::
VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
- return
- ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
}
@@ -320,6 +326,7 @@ ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
}
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
}
@@ -341,6 +348,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
QualType DestTy) {
+ switch (CK) {
+ case CK_GetObjCProperty: {
+ LValue LV = CGF.EmitLValue(Op);
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
+ }
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ return Visit(Op);
+
+ // TODO: do all of these
+ default:
+ break;
+ }
+
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
@@ -372,8 +395,6 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResR, *ResI;
@@ -390,8 +411,6 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
// ~(a+ib) = a + i*-b
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResI;
@@ -505,8 +524,6 @@ ComplexExprEmitter::BinOpInfo
ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
BinOpInfo Ops;
Ops.LHS = Visit(E->getLHS());
Ops.RHS = Visit(E->getRHS());
@@ -515,37 +532,31 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
}
-// Compound assignments.
-ComplexPairTy ComplexExprEmitter::
-EmitCompoundAssign(const CompoundAssignOperator *E,
- ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ ComplexPairTy &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
- QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+ QualType LHSTy = E->getLHS()->getType();
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
- // improve codegen a little. It is possible for the RHS to be complex or
- // scalar.
+ // improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(CK_Unknown, E->getRHS(), OpInfo.Ty);
+
+ // The RHS should have been converted to the computation type.
+ assert(OpInfo.Ty->isAnyComplexType());
+ assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
+ E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
LValue LHS = CGF.EmitLValue(E->getLHS());
- // We know the LHS is a complex lvalue.
- ComplexPairTy LHSComplexPair;
- if (LHS.isPropertyRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal();
- else if (LHS.isKVCRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal();
- else
- LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(),
- LHS.isVolatileQualified());
+
+ // Load from the l-value.
+ ComplexPairTy LHSComplexPair = EmitLoadOfLValue(LHS);
OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
@@ -554,108 +565,107 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// Truncate the result back to the LHS type.
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ Val = Result;
// Store the result value into the LHS lvalue.
- if (LHS.isPropertyRef())
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
- RValue::getComplex(Result));
- else if (LHS.isKVCRef())
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result));
- else
- EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
-
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
-
+ EmitStoreThroughLValue(Result, LHS);
+
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ ComplexPairTy Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
- return Result;
+ if (LV.isPropertyRef())
+ return Val;
- // Otherwise, reload the value.
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
-ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
- TestAndClearIgnoreReal();
- TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType()) &&
"Invalid assignment");
- // Emit the RHS.
- ComplexPairTy Val = Visit(E->getRHS());
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
+ // Emit the RHS. __block variables need the RHS evaluated first.
+ Val = Visit(E->getRHS());
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
// Store the result value into the LHS lvalue.
- if (LHS.isPropertyRef())
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
- else if (LHS.isKVCRef())
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
- else
- EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+ EmitStoreThroughLValue(Val, LHS);
+
+ return LHS;
+}
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LV.isPropertyRef())
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
return Val;
- // Otherwise, reload the value.
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
- CGF.EnsureInsertPoint();
+ CGF.EmitIgnoredExpr(E->getLHS());
return Visit(E->getRHS());
}
ComplexPairTy ComplexExprEmitter::
-VisitConditionalOperator(const ConditionalOperator *E) {
- if (!E->getLHS()) {
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- const llvm::Type *EltTy =
- CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
- llvm::Value *U = llvm::UndefValue::get(EltTy);
- return ComplexPairTy(U, U);
- }
-
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
-
- // Handle the GNU extension for missing LHS.
- assert(E->getLHS() && "Must have LHS for complex value");
-
- ComplexPairTy LHS = Visit(E->getLHS());
+ ComplexPairTy LHS = Visit(E->getTrueExpr());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
+ eval.end(CGF);
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
-
- ComplexPairTy RHS = Visit(E->getRHS());
+ ComplexPairTy RHS = Visit(E->getFalseExpr());
RHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
-
CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
// Create a PHI node for the real part.
llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
@@ -716,12 +726,11 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, ignoring the result.
ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
- bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ bool IgnoreImag) {
assert(E && E->getType()->isAnyComplexType() &&
"Invalid complex expression to emit");
- return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
- IgnoreImagAssign)
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
.Visit(const_cast<Expr*>(E));
}
@@ -749,3 +758,27 @@ ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
bool SrcIsVolatile) {
return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
}
+
+LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
+ switch (E->getOpcode()) {
+ case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
+ case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
+ case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
+ case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break;
+
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ Op = 0;
+ }
+
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 9c31c2a3d538..40d7b6c32e4b 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -142,11 +142,11 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
if (FieldSize > FieldValue.getBitWidth())
- FieldValue.zext(FieldSize);
+ FieldValue = FieldValue.zext(FieldSize);
// Truncate the size of FieldValue to the bit field size.
if (FieldSize < FieldValue.getBitWidth())
- FieldValue.trunc(FieldSize);
+ FieldValue = FieldValue.trunc(FieldSize);
if (FieldOffset < NextFieldOffsetInBytes * 8) {
// Either part of the field or the entire field can go into the previous
@@ -166,20 +166,20 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining high bits.
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
} else {
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining low bits.
FieldValue = FieldValue.lshr(BitsInPreviousByte);
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
}
}
- Tmp.zext(8);
+ Tmp = Tmp.zext(8);
if (CGM.getTargetData().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
@@ -231,13 +231,10 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
// We want the high bits.
- Tmp = FieldValue;
- Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
- Tmp.trunc(8);
+ Tmp = FieldValue.lshr(FieldValue.getBitWidth() - 8).trunc(8);
} else {
// We want the low bits.
- Tmp = FieldValue;
- Tmp.trunc(8);
+ Tmp = FieldValue.trunc(8);
FieldValue = FieldValue.lshr(8);
}
@@ -245,7 +242,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
NextFieldOffsetInBytes++;
- FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
assert(FieldValue.getBitWidth() > 0 &&
@@ -257,10 +254,9 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
- FieldValue.zext(8);
- FieldValue = FieldValue << (8 - BitWidth);
+ FieldValue = FieldValue.zext(8) << (8 - BitWidth);
} else
- FieldValue.zext(8);
+ FieldValue = FieldValue.zext(8);
}
// Append the last element.
@@ -372,7 +368,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
}
- uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+ uint64_t LayoutSizeInBytes = Layout.getSize().getQuantity();
if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
// If the struct is bigger than the size of the record type,
@@ -398,9 +394,9 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
// Append tail padding if necessary.
- AppendTailPadding(Layout.getSize());
+ AppendTailPadding(CGM.getContext().toBits(Layout.getSize()));
- assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ assert(Layout.getSize().getQuantity() == NextFieldOffsetInBytes &&
"Tail padding mismatch!");
return true;
@@ -454,17 +450,10 @@ public:
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
-
+
llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
- if (const MemberPointerType *MPT =
- E->getType()->getAs<MemberPointerType>()) {
- DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
- NamedDecl *ND = DRE->getDecl();
- if (MPT->isMemberFunctionPointer())
- return CGM.getCXXABI().EmitMemberPointer(cast<CXXMethodDecl>(ND));
- else
- return CGM.getCXXABI().EmitMemberPointer(cast<FieldDecl>(ND));
- }
+ if (E->getType()->isMemberPointerType())
+ return CGM.getMemberPointerConstant(E);
return 0;
}
@@ -755,7 +744,7 @@ public:
if (!VD->hasLocalStorage()) {
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
- else if (VD->isBlockVarDecl()) {
+ else if (VD->isLocalVarDecl()) {
assert(CGF && "Can't access static local vars without CGF");
return CGF->GetAddrOfStaticLocalVar(VD);
}
@@ -925,7 +914,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
else
Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
}
- return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ return llvm::ConstantVector::get(Inits);
}
}
}
@@ -938,6 +927,38 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return C;
}
+static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
+ const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
+ return layout.getFieldOffset(field->getFieldIndex());
+}
+
+llvm::Constant *
+CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
+ // Member pointer constants always have a very particular form.
+ const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
+ const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
+
+ // A member function pointer.
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
+ return getCXXABI().EmitMemberPointer(method);
+
+ // Otherwise, a member data pointer.
+ uint64_t fieldOffset;
+ if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
+ fieldOffset = getFieldOffset(getContext(), field);
+ else {
+ const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
+
+ fieldOffset = 0;
+ for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
+ ce = ifield->chain_end(); ci != ce; ++ci)
+ fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
+ }
+
+ CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ return getCXXABI().EmitMemberDataPointer(type, chars);
+}
+
static void
FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
std::vector<llvm::Constant *> &Elements,
@@ -964,8 +985,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
if (I->isVirtual()) {
- // FIXME: We should initialize null pointer to data members in virtual
- // bases here.
+ // Ignore virtual bases.
continue;
}
@@ -980,7 +1000,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
if (CGM.getTypes().isZeroInitializable(BaseDecl))
continue;
- uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
FillInNullDataMemberPointers(CGM, I->getType(),
Elements, StartOffset + BaseOffset);
}
@@ -1005,9 +1025,10 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
uint64_t StartIndex = StartOffset / 8;
uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+ // FIXME: hardcodes Itanium member pointer representation!
llvm::Constant *NegativeOne =
llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
- -1ULL, /*isSigned=*/true);
+ -1ULL, /*isSigned*/true);
// Fill in the null data member pointer.
for (uint64_t I = StartIndex; I != EndIndex; ++I)
@@ -1015,6 +1036,124 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
}
}
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ const llvm::Type *baseType,
+ const CXXRecordDecl *base);
+
+static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
+ const CXXRecordDecl *record,
+ bool asCompleteObject) {
+ const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
+ const llvm::StructType *structure =
+ (asCompleteObject ? layout.getLLVMType()
+ : layout.getBaseSubobjectLLVMType());
+
+ unsigned numElements = structure->getNumElements();
+ std::vector<llvm::Constant *> elements(numElements);
+
+ // Fill in all the bases.
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
+
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+
+ // Fill in all the fields.
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+
+ // Ignore bit fields.
+ if (field->isBitField())
+ continue;
+
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // Fill in the virtual bases, if we're working with the complete object.
+ if (asCompleteObject) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getVirtualBaseIndex(base);
+
+ // We might have already laid this field out.
+ if (elements[fieldIndex]) continue;
+
+ const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != numElements; ++i) {
+ if (!elements[i])
+ elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(structure, elements);
+}
+
+/// Emit the null constant for a base subobject.
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ const llvm::Type *baseType,
+ const CXXRecordDecl *base) {
+ const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
+
+ // Just zero out bases that don't have any pointer to data members.
+ if (baseLayout.isZeroInitializableAsBase())
+ return llvm::Constant::getNullValue(baseType);
+
+ // If the base type is a struct, we can just use its null constant.
+ if (isa<llvm::StructType>(baseType)) {
+ return EmitNullConstant(CGM, base, /*complete*/ false);
+ }
+
+ // Otherwise, some bases are represented as arrays of i8 if the size
+ // of the base is smaller than its corresponding LLVM type. Figure
+ // out how many elements this base array has.
+ const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ unsigned numBaseElements = baseArrayType->getNumElements();
+
+ // Fill in null data member pointers.
+ std::vector<llvm::Constant *> baseElements(numBaseElements);
+ FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
+ baseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (numBaseElements) {
+ const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
+ for (unsigned i = 0; i != numBaseElements; ++i) {
+ if (!baseElements[i])
+ baseElements[i] = i8_zero;
+ }
+ }
+
+ return llvm::ConstantArray::get(baseArrayType, baseElements);
+}
+
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
if (getTypes().isZeroInitializable(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
@@ -1036,79 +1175,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- const llvm::StructType *STy =
- cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
- unsigned NumElements = STy->getNumElements();
- std::vector<llvm::Constant *> Elements(NumElements);
-
- const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
-
- // Go through all bases and fill in any null pointer to data members.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual()) {
- // FIXME: We should initialize null pointer to data members in virtual
- // bases here.
- continue;
- }
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore empty bases.
- if (BaseDecl->isEmpty())
- continue;
-
- // Ignore bases that don't have any pointer to data members.
- if (getTypes().isZeroInitializable(BaseDecl))
- continue;
-
- // Currently, all bases are arrays of i8. Figure out how many elements
- // this base array has.
- unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
- const llvm::ArrayType *BaseArrayTy =
- cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
-
- unsigned NumBaseElements = BaseArrayTy->getNumElements();
- std::vector<llvm::Constant *> BaseElements(NumBaseElements);
-
- // Now fill in null data member pointers.
- FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
-
- // Now go through all other elements and zero them out.
- if (NumBaseElements) {
- llvm::Constant *Zero =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
-
- for (unsigned I = 0; I != NumBaseElements; ++I) {
- if (!BaseElements[I])
- BaseElements[I] = Zero;
- }
- }
-
- Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
- BaseElements);
- }
-
- for (RecordDecl::field_iterator I = RD->field_begin(),
- E = RD->field_end(); I != E; ++I) {
- const FieldDecl *FD = *I;
-
- // Ignore bit fields.
- if (FD->isBitField())
- continue;
-
- unsigned FieldNo = Layout.getLLVMFieldNo(FD);
- Elements[FieldNo] = EmitNullConstant(FD->getType());
- }
-
- // Now go through all other fields and zero them out.
- for (unsigned i = 0; i != NumElements; ++i) {
- if (!Elements[i])
- Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
- }
-
- return llvm::ConstantStruct::get(STy, Elements);
+ return ::EmitNullConstant(*this, RD, /*complete object*/ true);
}
assert(T->isMemberPointerType() && "Should only see member pointers here!");
@@ -1117,6 +1184,5 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
- return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
- /*isSigned=*/true);
+ return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 2318cc4e9aeb..3e1debd820b4 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -11,10 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
@@ -37,6 +39,7 @@ using llvm::Value;
// Scalar Expression Emitter
//===----------------------------------------------------------------------===//
+namespace {
struct BinOpInfo {
Value *LHS;
Value *RHS;
@@ -45,7 +48,13 @@ struct BinOpInfo {
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
-namespace {
+static bool MustVisitNullValue(const Expr *E) {
+ // If a null pointer expression's type is the C++0x nullptr_t, then
+ // it's not necessarily a simple constant and it must be evaluated
+ // for its potential side effects.
+ return E->getType()->isNullPtrType();
+}
+
class ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
@@ -101,10 +110,49 @@ public:
/// EmitNullValue - Emit a value that corresponds to null for the given type.
Value *EmitNullValue(QualType Ty);
+ /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
+ Value *EmitFloatToBoolConversion(Value *V) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
+ return Builder.CreateFCmpUNE(V, Zero, "tobool");
+ }
+
+ /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
+ Value *EmitPointerToBoolConversion(Value *V) {
+ Value *Zero = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(V->getType()));
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
+ Value *EmitIntToBoolConversion(Value *V) {
+ // Because of the type rules of C, we often end up computing a
+ // logical value, then zero extending it to int, then wanting it
+ // as a logical value again. Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
+ if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ const llvm::IntegerType *Ty = cast<llvm::IntegerType>(V->getType());
+ Value *Zero = llvm::ConstantInt::get(Ty, 0);
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
+ Value *Visit(Expr *E) {
+ return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
+ }
+
Value *VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
assert(0 && "Stmt can't have complex result type!");
@@ -112,7 +160,9 @@ public:
}
Value *VisitExpr(Expr *S);
- Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+ Value *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
@@ -133,11 +183,6 @@ public:
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
return EmitNullValue(E->getType());
}
- Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
- return llvm::ConstantInt::get(ConvertType(E->getType()),
- CGF.getContext().typesAreCompatible(
- E->getArgType1(), E->getArgType2()));
- }
Value *VisitOffsetOfExpr(OffsetOfExpr *E);
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
@@ -145,17 +190,45 @@ public:
return Builder.CreateBitCast(V, ConvertType(E->getType()));
}
+ Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ E->getPackLength());
+ }
+
+ Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getType());
+
+ // Otherwise, assume the mapping is the scalar directly.
+ return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ }
+
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
Expr::EvalResult Result;
- if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
- assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
- llvm::ConstantInt *CI
- = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
- CGF.EmitDeclRefExprDbgValue(E, CI);
- return CI;
+ if (!E->Evaluate(Result, CGF.getContext()))
+ return EmitLoadOfLValue(E);
+
+ assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
+
+ llvm::Constant *C;
+ if (Result.Val.isInt()) {
+ C = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ } else if (Result.Val.isFloat()) {
+ C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+ } else {
+ return EmitLoadOfLValue(E);
}
- return EmitLoadOfLValue(E);
+
+ // Make sure we emit a debug reference to the global variable.
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (!CGF.getContext().DeclMustBeEmitted(VD))
+ CGF.EmitDeclRefExprDbgValue(E, C);
+ } else if (isa<EnumConstantDecl>(E->getDecl())) {
+ CGF.EmitDeclRefExprDbgValue(E, C);
+ }
+
+ return C;
}
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
return CGF.EmitObjCSelectorExpr(E);
@@ -167,10 +240,8 @@ public:
return EmitLoadOfLValue(E);
}
Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- return EmitLoadOfLValue(E);
- }
- Value *VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
+ assert(E->getObjectKind() == OK_Ordinary &&
+ "reached property reference without lvalue-to-rvalue");
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -234,17 +305,26 @@ public:
return EmitScalarPrePostIncDec(E, LV, true, true);
}
+ llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal,
+ bool IsInc);
+
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
- // If the sub-expression is an instance member reference,
- // EmitDeclRefLValue will magically emit it with the appropriate
- // value as the "address".
+ if (isa<MemberPointerType>(E->getType())) // never sugared
+ return CGF.CGM.getMemberPointerConstant(E);
+
return EmitLValue(E->getSubExpr()).getAddress();
}
- Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryDeref(const UnaryOperator *E) {
+ if (E->getType()->isVoidType())
+ return Visit(E->getSubExpr()); // the actual value should be unused
+ return EmitLoadOfLValue(E);
+ }
Value *VisitUnaryPlus(const UnaryOperator *E) {
// This differs from gcc, though, most likely due to a bug in gcc.
TestAndClearIgnoreResultAssign();
@@ -267,8 +347,8 @@ public:
return CGF.LoadCXXThis();
}
- Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ Value *VisitExprWithCleanups(ExprWithCleanups *E) {
+ return CGF.EmitExprWithCleanups(E).getScalarVal();
}
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
@@ -278,8 +358,11 @@ public:
return 0;
}
Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
- return llvm::ConstantInt::get(Builder.getInt1Ty(),
- E->EvaluateTrait(CGF.getContext()));
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
+ Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
@@ -301,6 +384,10 @@ public:
return 0;
}
+ Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->hasSignedIntegerRepresentation()) {
@@ -318,9 +405,23 @@ public:
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
+ bool isTrapvOverflowBehavior() {
+ return CGF.getContext().getLangOptions().getSignedOverflowBehavior()
+ == LangOptions::SOB_Trapping;
+ }
/// Create a binary op that checks for overflow.
/// Currently only supports +, - and *.
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ // Emit the overflow BB when -ftrapv option is activated.
+ void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
+ Builder.SetInsertPoint(overflowBB);
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+ }
+ // Check for undefined division and modulus behaviors.
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ llvm::Value *Zero,bool isDiv);
Value *EmitDiv(const BinOpInfo &Ops);
Value *EmitRem(const BinOpInfo &Ops);
Value *EmitAdd(const BinOpInfo &Ops);
@@ -391,7 +492,7 @@ public:
// Other Operators.
Value *VisitBlockExpr(const BlockExpr *BE);
- Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
Value *VisitChooseExpr(ChooseExpr *CE);
Value *VisitVAArgExpr(VAArgExpr *VE);
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
@@ -409,11 +510,8 @@ public:
Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
- if (SrcType->isRealFloatingType()) {
- // Compare against 0.0 for fp scalars.
- llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
- return Builder.CreateFCmpUNE(Src, Zero, "tobool");
- }
+ if (SrcType->isRealFloatingType())
+ return EmitFloatToBoolConversion(Src);
if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
@@ -421,25 +519,11 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
"Unknown scalar type to convert");
- // Because of the type rules of C, we often end up computing a logical value,
- // then zero extending it to int, then wanting it as a logical value again.
- // Optimize this common case.
- if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
- if (ZI->getOperand(0)->getType() ==
- llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
- Value *Result = ZI->getOperand(0);
- // If there aren't any more uses, zap the instruction to save space.
- // Note that there can be more uses, for example if this
- // is the result of an assignment.
- if (ZI->use_empty())
- ZI->eraseFromParent();
- return Result;
- }
- }
+ if (isa<llvm::IntegerType>(Src->getType()))
+ return EmitIntToBoolConversion(Src);
- // Compare against an integer or pointer null.
- llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
- return Builder.CreateICmpNE(Src, Zero, "tobool");
+ assert(isa<llvm::PointerType>(Src->getType()));
+ return EmitPointerToBoolConversion(Src);
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
@@ -501,10 +585,10 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
- for (unsigned i = 0; i < NumElements; i++)
+ for (unsigned i = 0; i != NumElements; ++i)
Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -603,7 +687,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
}
- Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size());
+ Value* CV = llvm::ConstantVector::get(concat);
LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
LHSElts *= 2;
} else {
@@ -629,7 +713,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
MaskV.push_back(EltMask);
- Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size());
+ Value* MaskBits = llvm::ConstantVector::get(MaskV);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
// newv = undef
@@ -681,7 +765,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
indices.push_back(C);
}
- Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ Value *SV = llvm::ConstantVector::get(indices);
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
@@ -693,6 +777,17 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
EmitLValue(E->getBase());
return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
}
+
+ // Emit debug info for aggregate now, if it was delayed to reduce
+ // debug info size.
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
+ if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ M->getParent()->getLocation());
+ }
return EmitLoadOfLValue(E);
}
@@ -790,7 +885,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
VIsUndefShuffle = false;
}
if (!Args.empty()) {
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
V = Builder.CreateShuffleVector(LHS, RHS, Mask);
++CurIdx;
continue;
@@ -845,7 +940,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = InitElts; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
@@ -862,7 +957,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// merging subsequent shuffles into this one.
if (CurIdx == 0)
std::swap(V, Init);
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
VIsUndefShuffle = isa<llvm::UndefValue>(Init);
CurIdx += InitElts;
@@ -916,11 +1011,8 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// a default case, so the compiler will warn on a missing case. The cases
// are in the same order as in the CastKind enum.
switch (Kind) {
- case CK_Unknown:
- // FIXME: All casts should have a known kind!
- //assert(0 && "Unknown cast kind!");
- break;
-
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
@@ -963,9 +1055,6 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
return CGF.EmitDynamicCast(V, DCE);
}
- case CK_ToUnion:
- assert(0 && "Should be unreachable!");
- break;
case CK_ArrayToPointerDecay: {
assert(E->getType()->isArrayType() &&
@@ -988,10 +1077,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
case CK_FunctionToPointerDecay:
return EmitLValue(E).getAddress();
+ case CK_NullToPointer:
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ return llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(ConvertType(DestTy)));
+
case CK_NullToMemberPointer: {
- // If the subexpression's type is the C++0x nullptr_t, emit the
- // subexpression, which may have side effects.
- if (E->getType()->isNullPtrType())
+ if (MustVisitNullValue(E))
(void) Visit(E);
const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
@@ -1011,11 +1105,30 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
-
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex:
case CK_ConstructorConversion:
- assert(0 && "Should be unreachable!");
+ case CK_ToUnion:
+ llvm_unreachable("scalar cast to non-scalar value");
break;
+ case CK_GetObjCProperty: {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
+ "CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
+ RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
+ return RV.getScalarVal();
+ }
+
+ case CK_LValueToRValue:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
+ return Visit(const_cast<Expr*>(E));
+
case CK_IntegralToPointer: {
Value *Src = Visit(const_cast<Expr*>(E));
@@ -1038,10 +1151,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
}
case CK_ToVoid: {
- if (E->Classify(CGF.getContext()).isGLValue())
- CGF.EmitLValue(E);
- else
- CGF.EmitAnyExpr(E, 0, false, true);
+ CGF.EmitIgnoredExpr(E);
return 0;
}
case CK_VectorSplat: {
@@ -1056,62 +1166,55 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Int32Ty, 0);
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ Args.push_back(Zero);
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
+
case CK_IntegralCast:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+ case CK_IntegralToBoolean:
+ return EmitIntToBoolConversion(Visit(E));
+ case CK_PointerToBoolean:
+ return EmitPointerToBoolConversion(Visit(E));
+ case CK_FloatingToBoolean:
+ return EmitFloatToBoolConversion(Visit(E));
case CK_MemberPointerToBoolean: {
llvm::Value *MemPtr = Visit(E);
const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
}
- }
-
- // Handle cases where the source is an non-complex type.
- if (!CGF.hasAggregateLLVMType(E->getType())) {
- Value *Src = Visit(const_cast<Expr*>(E));
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ return CGF.EmitComplexExpr(E, false, true).first;
- // Use EmitScalarConversion to perform the conversion.
- return EmitScalarConversion(Src, E->getType(), DestTy);
- }
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
- if (E->getType()->isAnyComplexType()) {
- // Handle cases where the source is a complex type.
- bool IgnoreImag = true;
- bool IgnoreImagAssign = true;
- bool IgnoreReal = IgnoreResultAssign;
- bool IgnoreRealAssign = IgnoreResultAssign;
- if (DestTy->isBooleanType())
- IgnoreImagAssign = IgnoreImag = false;
- else if (DestTy->isVoidType()) {
- IgnoreReal = IgnoreImag = false;
- IgnoreRealAssign = IgnoreImagAssign = true;
- }
- CodeGenFunction::ComplexPairTy V
- = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
- IgnoreImagAssign);
+ // TODO: kill this function off, inline appropriate case here
return EmitComplexToScalarConversion(V, E->getType(), DestTy);
}
- // Okay, this is a cast from an aggregate. It must be a cast to void. Just
- // evaluate the result and return.
- CGF.EmitAggExpr(E, 0, false, true);
+ }
+
+ llvm_unreachable("unknown scalar cast");
return 0;
}
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
- return CGF.EmitCompoundStmt(*E->getSubStmt(),
- !E->getType()->isVoidType()).getScalarVal();
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
+ .getScalarVal();
}
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
@@ -1126,108 +1229,147 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
//===----------------------------------------------------------------------===//
llvm::Value *ScalarExprEmitter::
-EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
- bool isInc, bool isPre) {
-
- QualType ValTy = E->getSubExpr()->getType();
- llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy);
-
- int AmountVal = isInc ? 1 : -1;
-
- if (ValTy->isPointerType() &&
- ValTy->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition/subtraction needs to account for the VLA size
- CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal, bool IsInc) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ break;
}
+ assert(false && "Unknown SignedOverflowBehaviorTy");
+ return 0;
+}
+
+llvm::Value *
+ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType type = E->getSubExpr()->getType();
+ llvm::Value *value = EmitLoadOfLValue(LV, type);
+ llvm::Value *input = value;
+
+ int amount = (isInc ? 1 : -1);
+
+ // Special case of integer increment that we have to check first: bool++.
+ // Due to promotion rules, we get:
+ // bool++ -> bool = bool + 1
+ // -> bool = (int)bool + 1
+ // -> bool = ((int)bool + 1 != 0)
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ if (isInc && type->isBooleanType()) {
+ value = Builder.getTrue();
+
+ // Most common case by far: integer increment.
+ } else if (type->isIntegerType()) {
+
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ if (type->isSignedIntegerType())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+
+ // Unsigned integer inc is always two's complement.
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
- llvm::Value *NextVal;
- if (const llvm::PointerType *PT =
- dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal);
- if (!isa<llvm::FunctionType>(PT->getElementType())) {
- QualType PTEE = ValTy->getPointeeType();
- if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
- // Handle interface types, which are not represented with a concrete
- // type.
- int size = CGF.getContext().getTypeSize(OIT) / 8;
- if (!isInc)
- size = -size;
- Inc = llvm::ConstantInt::get(Inc->getType(), size);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- InVal = Builder.CreateBitCast(InVal, i8Ty);
- NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
- llvm::Value *lhs = LV.getAddress();
- lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = CGF.MakeAddrLValue(lhs, ValTy);
- } else
- NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ // Next most common: pointer increment.
+ } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType type = ptr->getPointeeType();
+
+ // VLA types don't have constant size.
+ if (type->isVariableArrayType()) {
+ llvm::Value *vlaSize =
+ CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
+ value = CGF.EmitCastToVoidPtr(value);
+ if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
+ value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // Arithmetic on function pointers (!) is just +-1.
+ } else if (type->isFunctionType()) {
+ llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
+
+ value = CGF.EmitCastToVoidPtr(value);
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // For everything else, we can just do a simple increment.
} else {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
- NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
- NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
}
- } else if (InVal->getType()->isIntegerTy(1) && isInc) {
- // Bool++ is an interesting case, due to promotion rules, we get:
- // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
- // Bool = ((int)Bool+1) != 0
- // An interesting aspect of this is that increment is always true.
- // Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue(VMContext);
- } else if (isa<llvm::IntegerType>(InVal->getType())) {
- NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
-
- if (!ValTy->isSignedIntegerType())
- // Unsigned integer inc is always two's complement.
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- else {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
- break;
- case LangOptions::SOB_Defined:
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- break;
- case LangOptions::SOB_Trapping:
- BinOpInfo BinOp;
- BinOp.LHS = InVal;
- BinOp.RHS = NextVal;
- BinOp.Ty = E->getType();
- BinOp.Opcode = BO_Add;
- BinOp.E = E;
- NextVal = EmitOverflowCheckedBinOp(BinOp);
- break;
- }
+
+ // Vector increment/decrement.
+ } else if (type->isVectorType()) {
+ if (type->hasIntegerRepresentation()) {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ if (type->hasSignedIntegerRepresentation())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ } else {
+ value = Builder.CreateFAdd(
+ value,
+ llvm::ConstantFP::get(value->getType(), amount),
+ isInc ? "inc" : "dec");
}
- } else {
+
+ // Floating point.
+ } else if (type->isRealFloatingType()) {
// Add the inc/dec to the real part.
- if (InVal->getType()->isFloatTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType()->isDoubleTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<double>(AmountVal)));
+ llvm::Value *amt;
+ if (value->getType()->isFloatTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(amount)));
+ else if (value->getType()->isDoubleTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(amount)));
else {
- llvm::APFloat F(static_cast<float>(AmountVal));
+ llvm::APFloat F(static_cast<float>(amount));
bool ignored;
F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
&ignored);
- NextVal = llvm::ConstantFP::get(VMContext, F);
+ amt = llvm::ConstantFP::get(VMContext, F);
}
- NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+
+ // Objective-C pointer types.
+ } else {
+ const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
+ value = CGF.EmitCastToVoidPtr(value);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
+ if (!isInc) size = -size;
+ llvm::Value *sizeValue =
+ llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
+
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateBitCast(value, input->getType());
}
// Store the updated result through the lvalue.
if (LV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, type, &value);
else
- CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV, type);
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
- return isPre ? NextVal : InVal;
+ return isPre ? value : input;
}
@@ -1346,7 +1488,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Compute the offset to the base.
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
- int64_t OffsetInt = RL.getBaseClassOffset(BaseRD) /
+ int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
CGF.getContext().getCharWidth();
Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
break;
@@ -1371,7 +1513,7 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
- CGF.EmitAnyExpr(E->getArgumentExpr());
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
return CGF.GetVLASize(VAT);
@@ -1387,21 +1529,38 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
return Visit(Op);
}
+
Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
- if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
- CGF.EmitLValue(Op);
- else
- CGF.EmitScalarExpr(Op, true);
+ CGF.EmitScalarExpr(Op, true);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -1478,8 +1637,12 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LHS.isPropertyRef())
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1490,8 +1653,51 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return EmitLoadOfLValue(LHS, E->getType());
}
+void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
+ const BinOpInfo &Ops,
+ llvm::Value *Zero, bool isDiv) {
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *contBB =
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn);
+
+ const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::Value *IntMin =
+ llvm::ConstantInt::get(VMContext,
+ llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+
+ llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
+ llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
+ llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
+ Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
+ overflowBB, contBB);
+ } else {
+ CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
+ overflowBB, contBB);
+ }
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(contBB);
+}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
+ else if (Ops.Ty->isRealFloatingType()) {
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
+ CGF.CurFn);
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn);
+ CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
+ overflowBB, DivCont);
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(DivCont);
+ }
+ }
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -1502,6 +1708,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ }
+
if (Ops.Ty->isUnsignedIntegerType())
return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
else
@@ -1544,21 +1757,56 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
// Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
// Handle overflow with llvm.trap.
- // TODO: it would be better to generate one of these blocks per function.
+ const std::string *handlerName =
+ &CGF.getContext().getLangOptions().OverflowHandler;
+ if (handlerName->empty()) {
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(continueBB);
+ return result;
+ }
+
+ // If an overflow handler is set, then we want to call it and then use its
+ // result, if it returns.
Builder.SetInsertPoint(overflowBB);
- llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
- Builder.CreateCall(Trap);
- Builder.CreateUnreachable();
-
- // Continue on.
+
+ // Get the overflow handler.
+ const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ std::vector<const llvm::Type*> argTypes;
+ argTypes.push_back(CGF.Int64Ty); argTypes.push_back(CGF.Int64Ty);
+ argTypes.push_back(Int8Ty); argTypes.push_back(Int8Ty);
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
+ llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+
+ // Sign extend the args to 64-bit, so that we can use the same handler for
+ // all types of overflow.
+ llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
+ llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
+
+ // Call the handler with the two arguments, the operation, and the size of
+ // the result.
+ llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs,
+ Builder.getInt8(OpID),
+ Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ // Truncate the result back to the desired size.
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+ Builder.CreateBr(continueBB);
+
Builder.SetInsertPoint(continueBB);
- return result;
+ llvm::PHINode *phi = Builder.CreatePHI(opTy);
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
}
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
@@ -1609,7 +1857,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
}
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.LLVMPointerWidth) {
+ if (Width < CGF.PointerWidthInBits) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
@@ -1682,7 +1930,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
// pointer - int
Value *Idx = Ops.RHS;
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.LLVMPointerWidth) {
+ if (Width < CGF.PointerWidthInBits) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
@@ -1792,6 +2040,48 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
return Builder.CreateAShr(Ops.LHS, RHS, "shr");
}
+enum IntrinsicType { VCMPEQ, VCMPGT };
+// return corresponding comparison intrinsic for given vector type
+static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
+ BuiltinType::Kind ElemKind) {
+ switch (ElemKind) {
+ default: assert(0 && "unexpected element type");
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
+ break;
+ case BuiltinType::UShort:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
+ break;
+ case BuiltinType::Short:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
+ break;
+ case BuiltinType::Float:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
+ break;
+ }
+ return llvm::Intrinsic::not_intrinsic;
+}
+
Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
unsigned SICmpOpc, unsigned FCmpOpc) {
TestAndClearIgnoreResultAssign();
@@ -1808,6 +2098,71 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
+ // If AltiVec, the comparison results in a numeric type, so we use
+ // intrinsics comparing vectors and giving 0 or 1 as a result
+ if (LHSTy->isVectorType() && CGF.getContext().getLangOptions().AltiVec) {
+ // constants for mapping CR6 register bits to predicate result
+ enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
+
+ llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
+
+ // in several cases vector arguments order will be reversed
+ Value *FirstVecArg = LHS,
+ *SecondVecArg = RHS;
+
+ QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
+ const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
+ BuiltinType::Kind ElementKind = BTy->getKind();
+
+ switch(E->getOpcode()) {
+ default: assert(0 && "is not a comparison operation");
+ case BO_EQ:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_NE:
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_LT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ break;
+ case BO_GT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ break;
+ case BO_LE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ }
+ break;
+ case BO_GE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ break;
+ }
+
+ Value *CR6Param = llvm::ConstantInt::get(CGF.Int32Ty, CR6);
+ llvm::Function *F = CGF.CGM.getIntrinsic(ID);
+ Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ }
+
if (LHS->getType()->isFPOrFPVectorTy()) {
Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
LHS, RHS, "cmp");
@@ -1881,8 +2236,12 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LHS.isPropertyRef())
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1913,6 +2272,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
// Branch on the LHS first. If it is false, go to the failure (cont) block.
CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
@@ -1926,10 +2287,10 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
- CGF.EndConditionalBranch();
+ eval.end(CGF);
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1963,6 +2324,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
// Branch on the LHS first. If it is true, go to the success (cont) block.
CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
@@ -1976,13 +2339,13 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
// Emit the RHS condition as a bool value.
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
- CGF.EndConditionalBranch();
+ eval.end(CGF);
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1997,7 +2360,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
+ CGF.EmitIgnoredExpr(E->getLHS());
CGF.EnsureInsertPoint();
return Visit(E->getRHS());
}
@@ -2034,95 +2397,107 @@ static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
Value *ScalarExprEmitter::
-VisitConditionalOperator(const ConditionalOperator *E) {
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
TestAndClearIgnoreResultAssign();
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ Expr *condExpr = E->getCond();
+ Expr *lhsExpr = E->getTrueExpr();
+ Expr *rhsExpr = E->getFalseExpr();
+
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm.
- if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
- Expr *Live = E->getLHS(), *Dead = E->getRHS();
- if (Cond == -1)
- std::swap(Live, Dead);
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(condExpr)){
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (Cond == -1) std::swap(live, dead);
// If the dead side doesn't have labels we need, and if the Live side isn't
// the gnu missing ?: extension (which we could handle, but don't bother
// to), just emit the Live part.
- if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
- Live) // Live part isn't missing.
- return Visit(Live);
+ if (!CGF.ContainsLabel(dead))
+ return Visit(live);
}
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if (CGF.getContext().getLangOptions().OpenCL
+ && condExpr->getType()->isVectorType()) {
+ llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+
+ const llvm::Type *condType = ConvertType(condExpr->getType());
+ const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+
+ unsigned numElem = vecTy->getNumElements();
+ const llvm::Type *elemType = vecTy->getElementType();
+
+ std::vector<llvm::Constant*> Zvals;
+ for (unsigned i = 0; i < numElem; ++i)
+ Zvals.push_back(llvm::ConstantInt::get(elemType,0));
+
+ llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
+ llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::VectorType::get(elemType,
+ numElem),
+ "sext");
+ llvm::Value *tmp2 = Builder.CreateNot(tmp);
+
+ // Cast float to int to perform ANDs if necessary.
+ llvm::Value *RHSTmp = RHS;
+ llvm::Value *LHSTmp = LHS;
+ bool wasCast = false;
+ const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ if (rhsVTy->getElementType()->isFloatTy()) {
+ RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
+ LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
+ wasCast = true;
+ }
+
+ llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
+ llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
+ llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
+ if (wasCast)
+ tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
+ return tmp5;
+ }
+
// If this is a really simple expression (like x ? 4 : 5), emit this as a
// select instead of as control flow. We can only do this if it is cheap and
// safe to evaluate the LHS and RHS unconditionally.
- if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
- CGF) &&
- isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
- llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
- llvm::Value *LHS = Visit(E->getLHS());
- llvm::Value *RHS = Visit(E->getRHS());
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
- if (!E->getLHS() && CGF.getContext().getLangOptions().CPlusPlus) {
- // Does not support GNU missing condition extension in C++ yet (see #7726)
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- return llvm::UndefValue::get(ConvertType(E->getType()));
- }
-
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
- Value *CondVal = 0;
-
- // If we don't have the GNU missing condition extension, emit a branch on bool
- // the normal way.
- if (E->getLHS()) {
- // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
- // the branch on bool.
- CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
- } else {
- // Otherwise, for the ?: extension, evaluate the conditional and then
- // convert it to bool the hard way. We do this explicitly because we need
- // the unconverted value for the missing middle value of the ?:.
- CondVal = CGF.EmitScalarExpr(E->getCond());
-
- // In some cases, EmitScalarConversion will delete the "CondVal" expression
- // if there are no extra uses (an optimization). Inhibit this by making an
- // extra dead use, because we're going to add a use of CondVal later. We
- // don't use the builder for this, because we don't want it to get optimized
- // away. This leaves dead code, but the ?: extension isn't common.
- new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
- Builder.GetInsertBlock());
-
- Value *CondBoolVal =
- CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
- CGF.getContext().BoolTy);
- Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
- }
-
- CGF.BeginConditionalBranch();
- CGF.EmitBlock(LHSBlock);
- // Handle the GNU extension for missing LHS.
- Value *LHS;
- if (E->getLHS())
- LHS = Visit(E->getLHS());
- else // Perform promotions, to handle cases like "short ?: int"
- LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+ eval.begin(CGF);
+ Value *LHS = Visit(lhsExpr);
+ eval.end(CGF);
- CGF.EndConditionalBranch();
LHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
+ Builder.CreateBr(ContBlock);
- CGF.BeginConditionalBranch();
CGF.EmitBlock(RHSBlock);
+ eval.begin(CGF);
+ Value *RHS = Visit(rhsExpr);
+ eval.end(CGF);
- Value *RHS = Visit(E->getRHS());
- CGF.EndConditionalBranch();
RHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
-
CGF.EmitBlock(ContBlock);
// If the LHS or RHS is a throw expression, it will be legitimately null.
@@ -2155,8 +2530,8 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return Builder.CreateLoad(ArgPtr);
}
-Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
- return CGF.BuildBlockLiteralTmp(BE);
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
+ return CGF.EmitBlockLiteral(block);
}
//===----------------------------------------------------------------------===//
@@ -2209,7 +2584,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
const llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
- if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) {
+ if (BaseExpr->isRValue()) {
V = CreateTempAlloca(ClassPtrTy, "resval");
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
@@ -2229,7 +2604,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
}
-LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
const CompoundAssignOperator *E) {
ScalarExprEmitter Scalar(*this);
Value *Result = 0;
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 6a6d63df8f52..08c458bd52d3 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -137,9 +138,49 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
E = OMD->param_end(); PI != E; ++PI)
Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+ CurGD = OMD;
+
StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocStart());
}
+void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
+ bool IsAtomic, bool IsStrong) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetGetStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList Args;
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
+ Size.getQuantity());
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *isAtomic =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsAtomic ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(isAtomic),
+ getContext().BoolTy));
+ llvm::Value *hasStrong =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsStrong ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(hasStrong),
+ getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+}
+
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
@@ -211,51 +252,30 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
} else {
- if (Ivar->getType()->isAnyComplexType()) {
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ QualType IVART = Ivar->getType();
+ if (IsAtomic &&
+ IVART->isScalarType() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
+ }
+ else if (IVART->isAnyComplexType()) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
LV.isVolatileQualified());
StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
}
- else if (hasAggregateLLVMType(Ivar->getType())) {
+ else if (hasAggregateLLVMType(IVART)) {
bool IsStrong = false;
- if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
+ if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(IVART)))
&& CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
- && CGM.getObjCRuntime().GetCopyStructFunction()) {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetCopyStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- // objc_copyStruct (ReturnValue, &structIvar,
- // sizeof (Type of Ivar), isAtomic, false);
- CallArgList Args;
- RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- // sizeof (Type of Ivar)
- uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
- llvm::Value *isAtomic =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsAtomic ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(isAtomic),
- getContext().BoolTy));
- llvm::Value *hasStrong =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsStrong ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(hasStrong),
- getContext().BoolTy));
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
+ && CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, IsAtomic, IsStrong);
}
else {
if (PID->getGetterCXXConstructor()) {
@@ -268,23 +288,61 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), IVART);
}
}
- } else {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ }
+ else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, IVART);
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
}
}
FinishFunction();
}
+void CodeGenFunction::GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
+ ObjCIvarDecl *Ivar) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetSetStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ CallArgList Args;
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ RValue RV =
+ RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsPtrTy =
+ Builder.CreateBitCast(Arg,
+ Types.ConvertType(getContext().VoidPtrTy));
+ RV = RValue::get(ArgAsPtrTy);
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
+ Size.getQuantity());
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+}
+
/// GenerateObjCSetter - Generate an Objective-C property setter
/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
/// is illegal within a category.
@@ -353,66 +411,49 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
} else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
!Ivar->getType()->isAnyComplexType() &&
IndirectObjCSetterArg(*CurFnInfo)
- && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ && CGM.getObjCRuntime().GetSetStructFunction()) {
// objc_copyStruct (&structIvar, &Arg,
// sizeof (struct something), true, false);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetCopyStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- CallArgList Args;
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
- RValue RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsPtrTy =
- Builder.CreateBitCast(Arg,
- Types.ConvertType(getContext().VoidPtrTy));
- RV = RValue::get(ArgAsPtrTy);
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- // sizeof (Type of Ivar)
- uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
- llvm::Value *True =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
- llvm::Value *False =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
- Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
} else if (PID->getSetterCXXAssignment()) {
- EmitAnyExpr(PID->getSetterCXXAssignment(), (llvm::Value *)0, false, true,
- false);
-
+ EmitIgnoredExpr(PID->getSetterCXXAssignment());
} else {
- // FIXME: Find a clean way to avoid AST node creation.
- SourceLocation Loc = PD->getLocation();
- ValueDecl *Self = OMD->getSelfDecl();
- ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
- DeclRefExpr Base(Self, Self->getType(), Loc);
- ParmVarDecl *ArgDecl = *OMD->param_begin();
- DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
- ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ QualType IVART = Ivar->getType();
+ if (IsAtomic &&
+ IVART->isScalarType() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
+ }
+ else {
+ // FIXME: Find a clean way to avoid AST node creation.
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), VK_RValue, Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), VK_LValue, Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
- // The property type can differ from the ivar type in some situations with
- // Objective-C pointer types, we can always bit cast the RHS in these cases.
- if (getContext().getCanonicalType(Ivar->getType()) !=
- getContext().getCanonicalType(ArgDecl->getType())) {
- ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
- Ivar->getType(), CK_BitCast, &Arg,
- VK_RValue);
- BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
- Ivar->getType(), Loc);
- EmitStmt(&Assign);
- } else {
- BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
- Ivar->getType(), Loc);
- EmitStmt(&Assign);
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ if (getContext().getCanonicalType(Ivar->getType()) !=
+ getContext().getCanonicalType(ArgDecl->getType())) {
+ ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
+ Ivar->getType(), CK_BitCast, &Arg,
+ VK_RValue);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
+ Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
+ EmitStmt(&Assign);
+ } else {
+ BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
+ Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
+ EmitStmt(&Assign);
+ }
}
}
@@ -422,24 +463,22 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
- llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> IvarInitializers;
+ llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
StartObjCMethod(MD, IMP->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
- CXXBaseOrMemberInitializer *Member = (*B);
+ CXXCtorInitializer *Member = (*B);
IvarInitializers.push_back(Member);
}
if (ctor) {
for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
- CXXBaseOrMemberInitializer *IvarInit = IvarInitializers[I];
- FieldDecl *Field = IvarInit->getMember();
- QualType FieldType = Field->getType();
+ CXXCtorInitializer *IvarInit = IvarInitializers[I];
+ FieldDecl *Field = IvarInit->getAnyMember();
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
- EmitAggExpr(IvarInit->getInit(), LV.getAddress(),
- LV.isVolatileQualified(), false, true);
+ EmitAggExpr(IvarInit->getInit(), AggValueSlot::forLValue(LV, true));
}
// constructor returns 'self'.
CodeGenTypes &Types = CGM.getTypes();
@@ -450,7 +489,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
} else {
// dtor
for (size_t i = IvarInitializers.size(); i > 0; --i) {
- FieldDecl *Field = IvarInitializers[i - 1]->getMember();
+ FieldDecl *Field = IvarInitializers[i - 1]->getAnyMember();
QualType FieldType = Field->getType();
const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
@@ -511,138 +550,128 @@ QualType CodeGenFunction::TypeOfSelfObject() {
return PTy->getPointeeType();
}
-RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
- const Selector &S,
- ReturnValueSlot Return) {
- llvm::Value *Receiver = LoadObjCSelf();
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+
+ // For certain base kinds, we need to emit the base immediately.
+ llvm::Value *Base;
+ if (E->isSuperReceiver())
+ Base = LoadObjCSelf();
+ else if (E->isClassReceiver())
+ Base = CGM.getObjCRuntime().GetClass(Builder, E->getClassReceiver());
+ else
+ Base = EmitScalarExpr(E->getBase());
+ return LValue::MakePropertyRef(E, Base);
+}
+
+static RValue GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector S,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs) {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CGF.CurFuncDecl);
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
- Return,
- Exp->getType(),
- S,
- OMD->getClassInterface(),
- isCategoryImpl,
- Receiver,
- isClassMessage,
- CallArgList());
-
+ return CGF.CGM.getObjCRuntime()
+ .GenerateMessageSendSuper(CGF, Return, ResultType,
+ S, OMD->getClassInterface(),
+ isCategoryImpl, Receiver,
+ isClassMessage, CallArgs);
}
-RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp,
- ReturnValueSlot Return) {
- Exp = Exp->IgnoreParens();
- // FIXME: Split it into two separate routines.
- if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
- Selector S = E->getProperty()->getGetterName();
- if (isa<ObjCSuperExpr>(E->getBase()))
- return EmitObjCSuperPropertyGet(E, S, Return);
- return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, Exp->getType(), S,
- EmitScalarExpr(E->getBase()),
- CallArgList());
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ ReturnValueSlot Return) {
+ const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
+ QualType ResultType;
+ Selector S;
+ if (E->isExplicitProperty()) {
+ const ObjCPropertyDecl *Property = E->getExplicitProperty();
+ S = Property->getGetterName();
+ ResultType = E->getType();
} else {
- const ObjCImplicitSetterGetterRefExpr *KE =
- cast<ObjCImplicitSetterGetterRefExpr>(Exp);
- Selector S = KE->getGetterMethod()->getSelector();
- llvm::Value *Receiver;
- if (KE->getInterfaceDecl()) {
- const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
- Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- } else if (isa<ObjCSuperExpr>(KE->getBase()))
- return EmitObjCSuperPropertyGet(KE, S, Return);
- else
- Receiver = EmitScalarExpr(KE->getBase());
- return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, Exp->getType(), S,
- Receiver,
- CallArgList(), KE->getInterfaceDecl());
+ const ObjCMethodDecl *Getter = E->getImplicitPropertyGetter();
+ S = Getter->getSelector();
+ ResultType = Getter->getResultType(); // with reference!
}
+
+ llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
+
+ // Accesses to 'super' follow a different code path.
+ if (E->isSuperReceiver())
+ return GenerateMessageSendSuper(*this, Return, ResultType,
+ S, Receiver, CallArgList());
+
+ const ObjCInterfaceDecl *ReceiverClass
+ = (E->isClassReceiver() ? E->getClassReceiver() : 0);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass);
}
-void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
- const Selector &S,
- RValue Src) {
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst) {
+ const ObjCPropertyRefExpr *E = Dst.getPropertyRefExpr();
+ Selector S = E->getSetterSelector();
+ QualType ArgType;
+ if (E->isImplicitProperty()) {
+ const ObjCMethodDecl *Setter = E->getImplicitPropertySetter();
+ ObjCMethodDecl::param_iterator P = Setter->param_begin();
+ ArgType = (*P)->getType();
+ } else {
+ ArgType = E->getType();
+ }
+ // FIXME. Other than scalars, AST is not adequate for setter and
+ // getter type mismatches which require conversion.
+ if (Src.isScalar()) {
+ llvm::Value *SrcVal = Src.getScalarVal();
+ QualType DstType = getContext().getCanonicalType(ArgType);
+ const llvm::Type *DstTy = ConvertType(DstType);
+ if (SrcVal->getType() != DstTy)
+ Src =
+ RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
+ }
+
CallArgList Args;
- llvm::Value *Receiver = LoadObjCSelf();
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
- bool isClassMessage = OMD->isClassMethod();
- bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- Args.push_back(std::make_pair(Src, Exp->getType()));
- CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
- ReturnValueSlot(),
- getContext().VoidTy,
- S,
- OMD->getClassInterface(),
- isCategoryImpl,
- Receiver,
- isClassMessage,
- Args);
- return;
-}
+ Args.push_back(std::make_pair(Src, ArgType));
-void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
- RValue Src) {
- // FIXME: Split it into two separate routines.
- if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
- Selector S = E->getProperty()->getSetterName();
- if (isa<ObjCSuperExpr>(E->getBase())) {
- EmitObjCSuperPropertySet(E, S, Src);
- return;
- }
- CallArgList Args;
- Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- getContext().VoidTy, S,
- EmitScalarExpr(E->getBase()),
- Args);
- } else if (const ObjCImplicitSetterGetterRefExpr *E =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
- const ObjCMethodDecl *SetterMD = E->getSetterMethod();
- Selector S = SetterMD->getSelector();
- CallArgList Args;
- llvm::Value *Receiver;
- if (E->getInterfaceDecl()) {
- const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
- Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- } else if (isa<ObjCSuperExpr>(E->getBase())) {
- EmitObjCSuperPropertySet(E, S, Src);
- return;
- } else
- Receiver = EmitScalarExpr(E->getBase());
- ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
- Args.push_back(std::make_pair(Src, (*P)->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- getContext().VoidTy, S,
- Receiver,
- Args, E->getInterfaceDecl());
- } else
- assert (0 && "bad expression node in EmitObjCPropertySet");
+ llvm::Value *Receiver = Dst.getPropertyRefBaseAddr();
+ QualType ResultType = getContext().VoidTy;
+
+ if (E->isSuperReceiver()) {
+ GenerateMessageSendSuper(*this, ReturnValueSlot(),
+ ResultType, S, Receiver, Args);
+ return;
+ }
+
+ const ObjCInterfaceDecl *ReceiverClass
+ = (E->isClassReceiver() ? E->getClassReceiver() : 0);
+
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, S, Receiver, Args,
+ ReceiverClass);
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
- llvm::Value *DeclAddress;
- QualType ElementTy;
if (!EnumerationMutationFn) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
}
- if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
- EmitStmt(SD);
- assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
- const Decl* D = SD->getSingleDecl();
- ElementTy = cast<ValueDecl>(D)->getType();
- DeclAddress = LocalDeclMap[D];
- } else {
- ElementTy = cast<Expr>(S.getElement())->getType();
- DeclAddress = 0;
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(Builder);
}
+ JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
+
// Fast enumeration state.
QualType StateTy = getContext().getObjCFastEnumerationStateType();
llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
@@ -651,7 +680,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Number of elements in the items array.
static const unsigned NumItems = 16;
- // Get selector
+ // Fetch the countByEnumeratingWithState:objects:count: selector.
IdentifierInfo *II[] = {
&CGM.getContext().Idents.get("countByEnumeratingWithState"),
&CGM.getContext().Idents.get("objects"),
@@ -666,79 +695,92 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+ // Emit the collection pointer.
llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+ // Send it our message:
CallArgList Args;
+
+ // The first argument is a temporary of the enumeration-state type.
Args.push_back(std::make_pair(RValue::get(StatePtr),
getContext().getPointerType(StateTy)));
+ // The second argument is a temporary array with space for NumItems
+ // pointers. We'll actually be loading elements from the array
+ // pointer written into the control state; this buffer is so that
+ // collections that *aren't* backed by arrays can still queue up
+ // batches of elements.
Args.push_back(std::make_pair(RValue::get(ItemsPtr),
getContext().getPointerType(ItemsTy)));
+ // The third argument is the capacity of that temporary array.
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
Args.push_back(std::make_pair(RValue::get(Count),
getContext().UnsignedLongTy));
+ // Start the enumeration.
RValue CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
- llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
- "limit.ptr");
- Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ // The initial number of objects that were returned in the buffer.
+ llvm::Value *initialBufferLimit = CountRV.getScalarVal();
- llvm::BasicBlock *NoElements = createBasicBlock("noelements");
- llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+ llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
+ llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
- llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
- llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+ llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
- llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
- Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+ // If the limit pointer was zero to begin with, the collection is
+ // empty; skip all this.
+ Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
+ EmptyBB, LoopInitBB);
- EmitBlock(SetStartMutations);
-
- llvm::Value *StartMutationsPtr = CreateMemTemp(getContext().UnsignedLongTy);
+ // Otherwise, initialize the loop.
+ EmitBlock(LoopInitBB);
+ // Save the initial mutations value. This is the value at an
+ // address that was written into the state object by
+ // countByEnumeratingWithState:objects:count:.
llvm::Value *StateMutationsPtrPtr =
Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
"mutationsptr");
- llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
- "mutations");
-
- Builder.CreateStore(StateMutations, StartMutationsPtr);
+ llvm::Value *initialMutations =
+ Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
- llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
- EmitBlock(LoopStart);
+ // Start looping. This is the point we return to whenever we have a
+ // fresh, non-empty batch of objects.
+ llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
+ EmitBlock(LoopBodyBB);
- llvm::Value *CounterPtr = CreateMemTemp(getContext().UnsignedLongTy,
- "counter.ptr");
- Builder.CreateStore(Zero, CounterPtr);
+ // The current index into the buffer.
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, "forcoll.index");
+ index->addIncoming(zero, LoopInitBB);
- llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
- EmitBlock(LoopBody);
+ // The current buffer size.
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, "forcoll.count");
+ count->addIncoming(initialBufferLimit, LoopInitBB);
+ // Check whether the mutations value has changed from where it was
+ // at start. StateMutationsPtr should actually be invariant between
+ // refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
- StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
-
- llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
- "mutations");
- llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
- StartMutations,
- "tobool");
+ llvm::Value *currentMutations
+ = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+ llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcool.notmutated");
- llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
- llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+ Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
+ WasNotMutatedBB, WasMutatedBB);
- Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
-
- EmitBlock(WasMutated);
+ // If so, call the enumeration-mutation function.
+ EmitBlock(WasMutatedBB);
llvm::Value *V =
Builder.CreateBitCast(Collection,
ConvertType(getContext().getObjCIdType()),
@@ -752,81 +794,114 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
FunctionType::ExtInfo()),
EnumerationMutationFn, ReturnValueSlot(), Args2);
- EmitBlock(WasNotMutated);
+ // Otherwise, or if the mutation function returns, just continue.
+ EmitBlock(WasNotMutatedBB);
- llvm::Value *StateItemsPtr =
- Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ // Initialize the element variable.
+ RunCleanupsScope elementVariableScope(*this);
+ bool elementIsDecl;
+ LValue elementLValue;
+ QualType elementType;
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
- llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), D->getType(),
+ VK_LValue, SourceLocation());
+ elementLValue = EmitLValue(&tempDRE);
+ elementType = D->getType();
+ elementIsDecl = true;
+ } else {
+ elementLValue = LValue(); // suppress warning
+ elementType = cast<Expr>(S.getElement())->getType();
+ elementIsDecl = false;
+ }
+ const llvm::Type *convertedElementType = ConvertType(elementType);
- llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
- "stateitems");
+ // Fetch the buffer out of the enumeration state.
+ // TODO: this pointer should actually be invariant between
+ // refreshes, which would help us do certain loop optimizations.
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ llvm::Value *EnumStateItems =
+ Builder.CreateLoad(StateItemsPtr, "stateitems");
+ // Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr =
- Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
-
- llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
-
- // Cast the item to the right type.
- CurrentItem = Builder.CreateBitCast(CurrentItem,
- ConvertType(ElementTy), "tmp");
+ Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
- if (!DeclAddress) {
- LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+ // Cast that value to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
+ "currentitem");
- // Set the value to null.
- Builder.CreateStore(CurrentItem, LV.getAddress());
- } else
- Builder.CreateStore(CurrentItem, DeclAddress);
+ // Make sure we have an l-value. Yes, this gets evaluated every
+ // time through the loop.
+ if (!elementIsDecl)
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
- // Increment the counter.
- Counter = Builder.CreateAdd(Counter,
- llvm::ConstantInt::get(UnsignedLongLTy, 1));
- Builder.CreateStore(Counter, CounterPtr);
-
- JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
- JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, elementType);
+ // Perform the loop body, setting up break and continue labels.
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
-
- EmitStmt(S.getBody());
-
+ {
+ RunCleanupsScope Scope(*this);
+ EmitStmt(S.getBody());
+ }
BreakContinueStack.pop_back();
+ // Destroy the element variable now.
+ elementVariableScope.ForceCleanup();
+
+ // Check whether there are more elements.
EmitBlock(AfterBody.getBlock());
- llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+ llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
+
+ // First we check in the local buffer.
+ llvm::Value *indexPlusOne
+ = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
+
+ // If we haven't overrun the buffer yet, we can continue.
+ Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
+ LoopBodyBB, FetchMoreBB);
- Counter = Builder.CreateLoad(CounterPtr);
- Limit = Builder.CreateLoad(LimitPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
- Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+ index->addIncoming(indexPlusOne, AfterBody.getBlock());
+ count->addIncoming(count, AfterBody.getBlock());
- // Fetch more elements.
- EmitBlock(FetchMore);
+ // Otherwise, we have to fetch more elements.
+ EmitBlock(FetchMoreBB);
CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
- Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
- Limit = Builder.CreateLoad(LimitPtr);
- IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
- Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+ // If we got a zero count, we're done.
+ llvm::Value *refetchCount = CountRV.getScalarVal();
+
+ // (note that the message send might split FetchMoreBB)
+ index->addIncoming(zero, Builder.GetInsertBlock());
+ count->addIncoming(refetchCount, Builder.GetInsertBlock());
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
+ EmptyBB, LoopBodyBB);
// No more elements.
- EmitBlock(NoElements);
+ EmitBlock(EmptyBB);
- if (!DeclAddress) {
+ if (!elementIsDecl) {
// If the element was not a declaration, set it to be null.
- LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+ llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(null), elementLValue, elementType);
+ }
- // Set the value to null.
- Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
- LV.getAddress());
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getEnd());
+ DI->EmitRegionEnd(Builder);
}
EmitBlock(LoopEnd.getBlock());
@@ -846,5 +921,3 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
}
CGObjCRuntime::~CGObjCRuntime() {}
-
-
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index d7960beb1d1d..d481e7792674 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -17,7 +17,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
+#include "CGCleanup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -62,7 +62,10 @@ private:
const llvm::IntegerType *IntTy;
const llvm::PointerType *PtrTy;
const llvm::IntegerType *LongTy;
+ const llvm::IntegerType *SizeTy;
+ const llvm::IntegerType *PtrDiffTy;
const llvm::PointerType *PtrToIntTy;
+ const llvm::Type *BoolTy;
llvm::GlobalAlias *ClassPtrAlias;
llvm::GlobalAlias *MetaClassPtrAlias;
std::vector<llvm::Constant*> Classes;
@@ -179,7 +182,8 @@ public:
virtual llvm::Function *ModuleInitFunction();
virtual llvm::Function *GetPropertyGetFunction();
virtual llvm::Function *GetPropertySetFunction();
- virtual llvm::Function *GetCopyStructFunction();
+ virtual llvm::Function *GetSetStructFunction();
+ virtual llvm::Function *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -212,8 +216,8 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) {
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
return NULLPtr;
}
};
@@ -273,6 +277,11 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+ SizeTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getSizeType()));
+ PtrDiffTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()));
+ BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Int8Ty = llvm::Type::getInt8Ty(VMContext);
// C string type. Used in lots of places.
@@ -318,8 +327,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
// id objc_assign_ivar(id, id, ptrdiff_t);
std::vector<const llvm::Type*> Args(1, IdTy);
Args.push_back(PtrToIdTy);
- // FIXME: ptrdiff_t
- Args.push_back(LongTy);
+ Args.push_back(PtrDiffTy);
llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
// id objc_assign_strongCast (id, id*)
@@ -342,8 +350,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
Args.clear();
Args.push_back(PtrToInt8Ty);
Args.push_back(PtrToInt8Ty);
- // FIXME: size_t
- Args.push_back(LongTy);
+ Args.push_back(SizeTy);
FTy = llvm::FunctionType::get(IdTy, Args, false);
MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
}
@@ -435,7 +442,7 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name);
+ linkage, C, Name);
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
@@ -443,7 +450,7 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name);
+ linkage, C, Name);
}
/// Generate an NSConstantString object.
@@ -995,7 +1002,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- LongTy,//FIXME: Should be size_t
+ SizeTy,
ProtocolArrayTy,
NULL);
std::vector<llvm::Constant*> Elements;
@@ -1250,7 +1257,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
ExistingProtocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- LongTy,//FIXME: Should be size_t
+ SizeTy,
ProtocolArrayTy,
NULL);
std::vector<llvm::Constant*> ProtocolElements;
@@ -1430,7 +1437,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
}
// Get the size of instances.
- int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+ int instanceSize =
+ Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
// Collect information about instance variables.
llvm::SmallVector<llvm::Constant*, 16> IvarNames;
@@ -1440,7 +1448,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
std::vector<llvm::Constant*> IvarOffsetValues;
int superInstanceSize = !SuperClassDecl ? 0 :
- Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
@@ -1469,7 +1477,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::ExternalLinkage,
- llvm::ConstantInt::get(IntTy, BaseOffset),
+ llvm::ConstantInt::get(IntTy, Offset),
"__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString()));
}
@@ -1538,7 +1546,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// allows code compiled for the non-Fragile ABI to inherit from code compiled
// for the legacy ABI, without causing problems. The converse is also
// possible, but causes all ivar accesses to be fragile.
- int i = 0;
+
// Offset pointer for getting at the correct field in the ivar list when
// setting up the alias. These are: The base address for the global, the
// ivar array (second field), the ivar in this list (set for each ivar), and
@@ -1548,15 +1556,16 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::ConstantInt::get(IndexTy, 1), 0,
llvm::ConstantInt::get(IndexTy, 2) };
- for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
- endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
- +(*iter)->getNameAsString();
- offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+ + IVD->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i);
// Get the correct ivar field
llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
IvarList, offsetPointerIndexes, 4);
- // Get the existing alias, if one exists.
+ // Get the existing variable, if one exists.
llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
if (offset) {
offset->setInitializer(offsetValue);
@@ -1585,13 +1594,15 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
- ClassPtrAlias->setAliasee(
+ ClassPtrAlias->replaceAllUsesWith(
llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
ClassPtrAlias = 0;
}
if (MetaClassPtrAlias) {
- MetaClassPtrAlias->setAliasee(
+ MetaClassPtrAlias->replaceAllUsesWith(
llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
MetaClassPtrAlias = 0;
}
@@ -1818,8 +1829,6 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
Params.push_back(IntTy);
@@ -1833,8 +1842,6 @@ llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
llvm::Function *CGObjCGNU::GetPropertySetFunction() {
std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
Params.push_back(IntTy);
@@ -1848,9 +1855,31 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() {
"objc_setProperty"));
}
-// FIXME. Implement this.
-llvm::Function *CGObjCGNU::GetCopyStructFunction() {
- return 0;
+llvm::Function *CGObjCGNU::GetGetStructFunction() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrDiffTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getPropertyStruct"));
+}
+llvm::Function *CGObjCGNU::GetSetStructFunction() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrDiffTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setPropertyStruct"));
}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
@@ -2008,7 +2037,7 @@ void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
}
@@ -2142,12 +2171,18 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// when linked against code which isn't (most of the time).
llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
if (!IvarOffsetPointer) {
- uint64_t Offset;
- if (ObjCImplementationDecl *OID =
- CGM.getContext().getObjCImplementation(
+ // This will cause a run-time crash if we accidentally use it. A value of
+ // 0 would seem more sensible, but will silently overwrite the isa pointer
+ // causing a great deal of confusion.
+ uint64_t Offset = -1;
+ // We can't call ComputeIvarBaseOffset() here if we have the
+ // implementation, because it will create an invalid ASTRecordLayout object
+ // that we are then stuck with forever, so we only initialize the ivar
+ // offset variable with a guess if we only have the interface. The
+ // initializer will be reset later anyway, when we are generating the class
+ // description.
+ if (!CGM.getContext().getObjCImplementation(
const_cast<ObjCInterfaceDecl *>(ID)))
- Offset = ComputeIvarBaseOffset(CGM, OID, Ivar);
- else
Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
llvm::ConstantInt *OffsetGuess =
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 54d0ff21ea44..7c679b90590c 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -16,7 +16,8 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -77,6 +78,7 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
++Index;
}
assert(Index != Ivars.size() && "Ivar is not inside container!");
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
return RL->getFieldOffset(Index);
}
@@ -129,7 +131,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
// a synthesized ivar can never be a bit-field, so this is safe.
const ASTRecordLayout &RL =
CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
- uint64_t TypeSizeInBits = RL.getSize();
+ uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
uint64_t BitOffset = FieldBitOffset % 8;
uint64_t ContainingTypeAlign = 8;
@@ -462,7 +464,7 @@ public:
// void objc_exception_rethrow(void)
std::vector<const llvm::Type*> Args;
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
}
@@ -999,8 +1001,8 @@ public:
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &);
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
};
@@ -1156,7 +1158,8 @@ public:
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
- virtual llvm::Constant *GetCopyStructFunction();
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *GetSetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -1401,7 +1404,10 @@ public:
return ObjCTypes.getSetPropertyFn();
}
- virtual llvm::Constant *GetCopyStructFunction() {
+ virtual llvm::Constant *GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -1520,7 +1526,7 @@ llvm::Constant *CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
- CGM.GetAddrOfConstantNSString(SL));
+ CGM.GetAddrOfConstantString(SL));
}
/// Generates a message send where the super is the receiver. This is
@@ -1662,57 +1668,76 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
return Qualifiers::GCNone;
}
-llvm::Constant *CGObjCCommonMac::GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &DeclRefs) {
- llvm::Constant *NullPtr =
+llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if ((CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ||
- DeclRefs.empty())
- return NullPtr;
+
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return nullPtr;
+
bool hasUnion = false;
SkipIvars.clear();
IvarsInfo.clear();
unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
- for (size_t i = 0; i < DeclRefs.size(); ++i) {
- const BlockDeclRefExpr *BDRE = DeclRefs[i];
- const ValueDecl *VD = BDRE->getDecl();
- CharUnits Offset = CGF.BlockDecls[VD];
- uint64_t FieldOffset = Offset.getQuantity();
- QualType Ty = VD->getType();
- assert(!Ty->isArrayType() &&
- "Array block variable should have been caught");
- if ((Ty->isRecordType() || Ty->isUnionType()) && !BDRE->isByRef()) {
- BuildAggrIvarRecordLayout(Ty->getAs<RecordType>(),
- FieldOffset,
- true,
- hasUnion);
+ // __isa is the first field in block descriptor and must assume by runtime's
+ // convention that it is GC'able.
+ IvarsInfo.push_back(GC_IVAR(0, 1));
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
+
+ // __block variables are passed by their descriptor address.
+ if (ci->isByRef()) {
+ IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
+ continue;
+ }
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion);
continue;
}
- Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), Ty);
- unsigned FieldSize = CGM.getContext().getTypeSize(Ty);
- // __block variables are passed by their descriptior address. So, size
- // must reflect this.
- if (BDRE->isByRef())
- FieldSize = WordSizeInBits;
- if (GCAttr == Qualifiers::Strong || BDRE->isByRef())
- IvarsInfo.push_back(GC_IVAR(FieldOffset,
- FieldSize / WordSizeInBits));
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
+ unsigned fieldSize = CGM.getContext().getTypeSize(type);
+
+ if (GCAttr == Qualifiers::Strong)
+ IvarsInfo.push_back(GC_IVAR(fieldOffset,
+ fieldSize / WordSizeInBits));
else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
- SkipIvars.push_back(GC_IVAR(FieldOffset,
- FieldSize / ByteSizeInBits));
+ SkipIvars.push_back(GC_IVAR(fieldOffset,
+ fieldSize / ByteSizeInBits));
}
if (IvarsInfo.empty())
- return NullPtr;
- // Sort on byte position in case we encounterred a union nested in
- // block variable type's aggregate type.
- if (hasUnion && !IvarsInfo.empty())
- std::sort(IvarsInfo.begin(), IvarsInfo.end());
- if (hasUnion && !SkipIvars.empty())
- std::sort(SkipIvars.begin(), SkipIvars.end());
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
+ llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end());
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
@@ -2189,10 +2214,10 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getNumIvarInitializers())
Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
- CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
// FIXME: Set CXX-structors flag.
- if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
@@ -2277,7 +2302,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
unsigned Flags = eClassFlags_Meta;
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
- if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
std::vector<llvm::Constant*> Values(12);
@@ -2578,7 +2603,10 @@ llvm::Constant *CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
-llvm::Constant *CGObjCMac::GetCopyStructFunction() {
+llvm::Constant *CGObjCMac::GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+llvm::Constant *CGObjCMac::GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -2968,6 +2996,10 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
+ // A slot containing the exception to rethrow. Only needed when we
+ // have both a @catch and a @finally.
+ llvm::Value *PropagatingExnVar = 0;
+
// Push a normal cleanup to leave the try scope.
CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
SyncArgSlot,
@@ -3039,6 +3071,12 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::BasicBlock *CatchBlock = 0;
llvm::BasicBlock *CatchHandler = 0;
if (HasFinally) {
+ // Save the currently-propagating exception before
+ // objc_exception_try_enter clears the exception slot.
+ PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ "propagating_exception");
+ CGF.Builder.CreateStore(Caught, PropagatingExnVar);
+
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
@@ -3090,7 +3128,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
if (CatchParam) {
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
// These types work out because ConvertType(id) == i8*.
@@ -3134,7 +3172,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// the end of the catch body.
CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
// Initialize the catch variable.
@@ -3173,6 +3211,15 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// the try's write hazard and here.
//Hazards.emitWriteHazard();
+ // Extract the new exception and save it to the
+ // propagating-exception slot.
+ assert(PropagatingExnVar);
+ llvm::CallInst *NewCaught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ NewCaught->setDoesNotThrow();
+ CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
+
// Don't pop the catch handler; the throw already did.
CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
@@ -3193,13 +3240,21 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
CGF.EmitBlock(FinallyRethrow.getBlock(), true);
if (CGF.HaveInsertPoint()) {
- // Just look in the buffer for the exception to throw.
- llvm::CallInst *Caught =
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData);
- Caught->setDoesNotThrow();
+ // If we have a propagating-exception variable, check it.
+ llvm::Value *PropagatingExn;
+ if (PropagatingExnVar) {
+ PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
+
+ // Otherwise, just look in the buffer for the exception to throw.
+ } else {
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+ PropagatingExn = Caught;
+ }
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), Caught)
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), PropagatingExn)
->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
}
@@ -3586,10 +3641,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
uint64_t MaxSkippedUnionIvarSize = 0;
FieldDecl *MaxField = 0;
FieldDecl *MaxSkippedField = 0;
- FieldDecl *LastFieldBitfield = 0;
+ FieldDecl *LastFieldBitfieldOrUnnamed = 0;
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
- uint64_t LastBitfieldOffset = 0;
+ uint64_t LastBitfieldOrUnnamedOffset = 0;
if (RecFields.empty())
return;
@@ -3609,12 +3664,12 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
// Skip over unnamed or bitfields
if (!Field->getIdentifier() || Field->isBitField()) {
- LastFieldBitfield = Field;
- LastBitfieldOffset = FieldOffset;
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
continue;
}
- LastFieldBitfield = 0;
+ LastFieldBitfieldOrUnnamed = 0;
QualType FQT = Field->getType();
if (FQT->isRecordType() || FQT->isUnionType()) {
if (FQT->isUnionType())
@@ -3641,7 +3696,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
assert(!FQT->isUnionType() &&
"layout for array of unions not supported");
- if (FQT->isRecordType()) {
+ if (FQT->isRecordType() && ElCount) {
int OldIndex = IvarsInfo.size() - 1;
int OldSkIndex = SkipIvars.size() -1;
@@ -3703,16 +3758,25 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
}
}
- if (LastFieldBitfield) {
- // Last field was a bitfield. Must update skip info.
- Expr *BitWidth = LastFieldBitfield->getBitWidth();
- uint64_t BitFieldSize =
- BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
- GC_IVAR skivar;
- skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
- skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
- + ((BitFieldSize % ByteSizeInBits) != 0);
- SkipIvars.push_back(skivar);
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfieldOrUnnamed->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ unsigned FieldSize
+ = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType());
+ SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize / ByteSizeInBits));
+ }
}
if (MaxField)
@@ -4886,7 +4950,7 @@ void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
CGM.getContext().getASTObjCImplementationLayout(OID);
// InstanceSize is really instance end.
- InstanceSize = llvm::RoundUpToAlignment(RL.getDataSize(), 8) / 8;
+ InstanceSize = RL.getDataSize().getQuantity();
// If there are no fields, the start is the same as the end.
if (!RL.getFieldCount())
@@ -4927,7 +4991,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::GlobalVariable *SuperClassGV, *IsAGV;
bool classIsHidden =
- CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
if (ID->getNumIvarInitializers())
@@ -5222,7 +5286,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
// well (i.e., in ObjCIvarOffsetVariable).
if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
Ivar->getAccessControl() == ObjCIvarDecl::Package ||
- CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ ID->getVisibility() == HiddenVisibility)
IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
else
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
@@ -6096,7 +6160,7 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
}
@@ -6124,32 +6188,20 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
/// EmitThrowStmt - Generate code for a throw statement.
void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
- llvm::Value *Exception;
- llvm::Constant *FunctionThrowOrRethrow;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
- Exception = CGF.EmitScalarExpr(ThrowExpr);
- FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn();
- } else {
- assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
- "Unexpected rethrow outside @catch block.");
- Exception = CGF.ObjCEHValueStack.back();
- FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn();
- }
-
- llvm::Value *ExceptionAsObject =
- CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
- llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
- if (InvokeDest) {
- CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
- CGF.getUnreachableBlock(), InvokeDest,
- &ExceptionAsObject, &ExceptionAsObject + 1);
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy,
+ "tmp");
+ llvm::Value *Args[] = { Exception };
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(),
+ Args, Args+1)
+ .setDoesNotReturn();
} else {
- CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
- ->setDoesNotReturn();
- CGF.Builder.CreateUnreachable();
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn(), 0, 0)
+ .setDoesNotReturn();
}
- // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
@@ -6208,7 +6260,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
ID->getIdentifier()->getName()));
}
- if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ if (CGM.getLangOptions().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
ObjCTypes.EHTypeTy));
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 584760f6f343..5ad3a50adc3f 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -56,6 +56,7 @@ namespace CodeGen {
namespace CodeGen {
class CodeGenModule;
+ class CGBlockInfo;
// FIXME: Several methods should be pure virtual but aren't to avoid the
// partially-implemented subclass breaking.
@@ -176,8 +177,10 @@ public:
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
- // API for atomic copying of qualified aggregates in setter/getter.
- virtual llvm::Constant *GetCopyStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in getter.
+ virtual llvm::Constant *GetGetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter.
+ virtual llvm::Constant *GetSetStructFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
@@ -219,9 +222,8 @@ public:
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
llvm::Value *Size) = 0;
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) = 0;
-
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 60df613f65e8..7ec0ee4b5cab 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -30,6 +30,10 @@ class RTTIBuilder {
/// Fields - The fields of the RTTI descriptor currently being built.
llvm::SmallVector<llvm::Constant *, 16> Fields;
+ /// GetAddrOfTypeName - Returns the mangled type name of the given type.
+ llvm::GlobalVariable *
+ GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
+
/// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
/// descriptor of the given type.
llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
@@ -59,64 +63,10 @@ class RTTIBuilder {
void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
public:
- RTTIBuilder(CodeGenModule &cgm)
- : CGM(cgm), VMContext(cgm.getModule().getContext()),
- Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
-
- llvm::Constant *BuildName(QualType Ty, bool Hidden,
- llvm::GlobalVariable::LinkageTypes Linkage) {
- llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, OutName);
- llvm::StringRef Name = OutName.str();
-
- llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
- if (OGV && !OGV->isDeclaration())
- return llvm::ConstantExpr::getBitCast(OGV, Int8PtrTy);
-
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Name.substr(4));
-
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, Linkage,
- C, Name);
- if (OGV) {
- GV->takeName(OGV);
- llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV,
- OGV->getType());
- OGV->replaceAllUsesWith(NewPtr);
- OGV->eraseFromParent();
- }
- if (Hidden)
- GV->setVisibility(llvm::GlobalVariable::HiddenVisibility);
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
- }
+ RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
+ VMContext(CGM.getModule().getContext()),
+ Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
- // FIXME: unify with DecideExtern
- bool DecideHidden(QualType Ty) {
- // For this type, see if all components are never hidden.
- if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
- return (DecideHidden(MPT->getPointeeType())
- && DecideHidden(QualType(MPT->getClass(), 0)));
- if (const PointerType *PT = Ty->getAs<PointerType>())
- return DecideHidden(PT->getPointeeType());
- if (const FunctionType *FT = Ty->getAs<FunctionType>()) {
- if (DecideHidden(FT->getResultType()) == false)
- return false;
- if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()) {
- for (unsigned i = 0; i <FPT->getNumArgs(); ++i)
- if (DecideHidden(FPT->getArgType(i)) == false)
- return false;
- for (unsigned i = 0; i <FPT->getNumExceptions(); ++i)
- if (DecideHidden(FPT->getExceptionType(i)) == false)
- return false;
- return true;
- }
- }
- if (const RecordType *RT = Ty->getAs<RecordType>())
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return CGM.getDeclVisibilityMode(RD) == LangOptions::Hidden;
- return false;
- }
-
// Pointer type info flags.
enum {
/// PTI_Const - Type has const qualifier.
@@ -162,10 +112,34 @@ public:
};
}
+llvm::GlobalVariable *
+RTTIBuilder::GetAddrOfTypeName(QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ llvm::Constant *Init = llvm::ConstantArray::get(VMContext, Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+
+ GV->setInitializer(Init);
+
+ return GV;
+}
+
llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
// Look for an existing global.
@@ -187,15 +161,17 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
// Basic type information (e.g. for "int", "bool", etc.) will be kept in
// the run-time support library. Specifically, the run-time support
// library should contain type_info objects for the types X, X* and
- // X const*, for every X in: void, bool, wchar_t, char, unsigned char,
- // signed char, short, unsigned short, int, unsigned int, long,
- // unsigned long, long long, unsigned long long, float, double, long double,
- // char16_t, char32_t, and the IEEE 754r decimal and half-precision
- // floating point types.
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
switch (Ty->getKind()) {
case BuiltinType::Void:
+ case BuiltinType::NullPtr:
case BuiltinType::Bool:
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Char_U:
case BuiltinType::Char_S:
case BuiltinType::UChar:
@@ -219,12 +195,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::UndeducedAuto:
assert(false && "Should not see this type here!");
- case BuiltinType::NullPtr:
- assert(false && "FIXME: nullptr_t is not handled!");
-
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
@@ -270,8 +242,9 @@ static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
/// the given type exists somewhere else, and that we should not emit the type
/// information in this translation unit. Assumes that it is not a
/// standard-library type.
-static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
- QualType Ty) {
+static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
+ ASTContext &Context = CGM.getContext();
+
// If RTTI is disabled, don't consider key functions.
if (!Context.getLangOptions().RTTI) return false;
@@ -283,13 +256,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
if (!RD->isDynamicClass())
return false;
- // Get the key function.
- const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- if (KeyFunction && !KeyFunction->hasBody()) {
- // The class has a key function, but it is not defined in this translation
- // unit, so we should use the external descriptor for it.
- return true;
- }
+ return !CGM.getVTables().ShouldEmitVTableInThisTU(RD);
}
return false;
@@ -335,7 +302,8 @@ static bool ContainsIncompleteClassType(QualType Ty) {
/// getTypeInfoLinkage - Return the linkage that the type info and type info
/// name constants should have for the given type.
-static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
+static llvm::GlobalVariable::LinkageTypes
+getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
// Itanium C++ ABI 2.9.5p7:
// In addition, it and all of the intermediate abi::__pointer_type_info
// structs in the chain down to the abi::__class_type_info for the
@@ -355,16 +323,22 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
return llvm::GlobalValue::InternalLinkage;
case ExternalLinkage:
+ if (!CGM.getLangOptions().RTTI) {
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
if (RD->isDynamicClass())
- return CodeGenModule::getVTableLinkage(RD);
+ return CGM.getVTableLinkage(RD);
}
- return llvm::GlobalValue::WeakODRLinkage;
+ return llvm::GlobalValue::LinkOnceODRLinkage;
}
- return llvm::GlobalValue::WeakODRLinkage;
+ return llvm::GlobalValue::LinkOnceODRLinkage;
}
// CanUseSingleInheritance - Return whether the given record decl has a "single,
@@ -513,23 +487,81 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
Fields.push_back(VTable);
}
+// maybeUpdateRTTILinkage - Will update the linkage of the RTTI data structures
+// from available_externally to the correct linkage if necessary. An example of
+// this is:
+//
+// struct A {
+// virtual void f();
+// };
+//
+// const std::type_info &g() {
+// return typeid(A);
+// }
+//
+// void A::f() { }
+//
+// When we're generating the typeid(A) expression, we do not yet know that
+// A's key function is defined in this translation unit, so we will give the
+// typeinfo and typename structures available_externally linkage. When A::f
+// forces the vtable to be generated, we need to change the linkage of the
+// typeinfo and typename structs, otherwise we'll end up with undefined
+// externals when linking.
+static void
+maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
+ QualType Ty) {
+ // We're only interested in globals with available_externally linkage.
+ if (!GV->hasAvailableExternallyLinkage())
+ return;
+
+ // Get the real linkage for the type.
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // If variable is supposed to have available_externally linkage, we don't
+ // need to do anything.
+ if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
+ return;
+
+ // Update the typeinfo linkage.
+ GV->setLinkage(Linkage);
+
+ // Get the typename global.
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
+
+ assert(TypeNameGV->hasAvailableExternallyLinkage() &&
+ "Type name has different linkage from type info!");
+
+ // And update its linkage.
+ TypeNameGV->setLinkage(Linkage);
+}
+
llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
-
+
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
- if (OldGV && !OldGV->isDeclaration())
+ if (OldGV && !OldGV->isDeclaration()) {
+ maybeUpdateRTTILinkage(CGM, OldGV, Ty);
+
return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+ }
// Check if there is already an external RTTI descriptor for this type.
bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
- if (!Force &&
- (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty)))
+ if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
return GetAddrOfExternalRTTIDescriptor(Ty);
// Emit the standard library with external linkage.
@@ -537,13 +569,16 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
if (IsStdLib)
Linkage = llvm::GlobalValue::ExternalLinkage;
else
- Linkage = getTypeInfoLinkage(Ty);
+ Linkage = getTypeInfoLinkage(CGM, Ty);
// Add the vtable pointer.
BuildVTablePointer(cast<Type>(Ty));
// And the name.
- Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
+ llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -641,13 +676,28 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// type_infos themselves, so we can emit these as hidden symbols.
// But don't do this if we're worried about strict visibility
// compatibility.
- if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- CGM.setTypeVisibility(GV, cast<CXXRecordDecl>(RT->getDecl()),
- /*ForRTTI*/ true);
- else if (CGM.getCodeGenOpts().HiddenWeakVTables &&
- Linkage == llvm::GlobalValue::WeakODRLinkage)
- GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForRTTI);
+ CGM.setTypeVisibility(TypeName, RD, CodeGenModule::TVK_ForRTTIName);
+ } else {
+ Visibility TypeInfoVisibility = DefaultVisibility;
+ if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ TypeInfoVisibility = HiddenVisibility;
+
+ // The type name should have the same visibility as the type itself.
+ Visibility ExplicitVisibility = Ty->getVisibility();
+ TypeName->setVisibility(CodeGenModule::
+ GetLLVMVisibility(ExplicitVisibility));
+
+ TypeInfoVisibility = minVisibility(TypeInfoVisibility, Ty->getVisibility());
+ GV->setVisibility(CodeGenModule::GetLLVMVisibility(TypeInfoVisibility));
+ }
+
+ GV->setUnnamedAddr(true);
+
return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
}
@@ -701,12 +751,14 @@ void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
Fields.push_back(BaseTypeInfo);
}
-/// SeenBases - Contains virtual and non-virtual bases seen when traversing
-/// a class hierarchy.
-struct SeenBases {
- llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
- llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
-};
+namespace {
+ /// SeenBases - Contains virtual and non-virtual bases seen when traversing
+ /// a class hierarchy.
+ struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+ };
+}
/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
/// abi::__vmi_class_type_info.
@@ -827,7 +879,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
+ OffsetFlags = Layout.getBaseClassOffsetInBits(BaseDecl) / 8;
};
OffsetFlags <<= 8;
@@ -938,16 +990,16 @@ void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
}
void CodeGenModule::EmitFundamentalRTTIDescriptors() {
- QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
- Context.Char16Ty, Context.UnsignedLongLongTy,
- Context.LongLongTy, Context.WCharTy,
- Context.UnsignedShortTy, Context.ShortTy,
- Context.UnsignedLongTy, Context.LongTy,
- Context.UnsignedIntTy, Context.IntTy,
- Context.UnsignedCharTy, Context.FloatTy,
- Context.LongDoubleTy, Context.DoubleTy,
- Context.CharTy, Context.BoolTy,
- Context.SignedCharTy };
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.NullPtrTy,
+ Context.BoolTy, Context.WCharTy,
+ Context.CharTy, Context.UnsignedCharTy,
+ Context.SignedCharTy, Context.ShortTy,
+ Context.UnsignedShortTy, Context.IntTy,
+ Context.UnsignedIntTy, Context.LongTy,
+ Context.UnsignedLongTy, Context.LongLongTy,
+ Context.UnsignedLongLongTy, Context.FloatTy,
+ Context.DoubleTy, Context.LongDoubleTy,
+ Context.Char16Ty, Context.Char32Ty };
for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
}
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 9b4e9f86c6da..30da05f421fe 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -11,10 +11,11 @@
#define CLANG_CODEGEN_CGRECORDLAYOUT_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/DerivedTypes.h"
#include "clang/AST/Decl.h"
namespace llvm {
class raw_ostream;
- class Type;
+ class StructType;
}
namespace clang {
@@ -172,8 +173,13 @@ class CGRecordLayout {
void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
private:
- /// The LLVMType corresponding to this record layout.
- const llvm::Type *LLVMType;
+ /// The LLVM type corresponding to this record layout; used when
+ /// laying it out as a complete object.
+ llvm::PATypeHolder CompleteObjectType;
+
+ /// The LLVM type for the non-virtual part of this record layout;
+ /// used when laying it out as a base subobject.
+ llvm::PATypeHolder BaseSubobjectType;
/// Map from (non-bit-field) struct field to the corresponding llvm struct
/// type field no. This info is populated by record builder.
@@ -185,19 +191,41 @@ private:
// FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
// map for both virtual and non virtual bases.
- llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBaseFields;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
- /// Whether one of the fields in this record layout is a pointer to data
- /// member, or a struct that contains pointer to data member.
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a complete object, requires a non-zero bitpattern
+ /// when zero-initialized.
bool IsZeroInitializable : 1;
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a base subobject, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializableAsBase : 1;
+
public:
- CGRecordLayout(const llvm::Type *T, bool IsZeroInitializable)
- : LLVMType(T), IsZeroInitializable(IsZeroInitializable) {}
+ CGRecordLayout(const llvm::StructType *CompleteObjectType,
+ const llvm::StructType *BaseSubobjectType,
+ bool IsZeroInitializable,
+ bool IsZeroInitializableAsBase)
+ : CompleteObjectType(CompleteObjectType),
+ BaseSubobjectType(BaseSubobjectType),
+ IsZeroInitializable(IsZeroInitializable),
+ IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
+
+ /// \brief Return the "complete object" LLVM type associated with
+ /// this record.
+ const llvm::StructType *getLLVMType() const {
+ return cast<llvm::StructType>(CompleteObjectType.get());
+ }
- /// \brief Return the LLVM type associated with this record.
- const llvm::Type *getLLVMType() const {
- return LLVMType;
+ /// \brief Return the "base subobject" LLVM type associated with
+ /// this record.
+ const llvm::StructType *getBaseSubobjectLLVMType() const {
+ return cast<llvm::StructType>(BaseSubobjectType.get());
}
/// \brief Check whether this struct can be C++ zero-initialized
@@ -206,6 +234,12 @@ public:
return IsZeroInitializable;
}
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer when considered as a base subobject.
+ bool isZeroInitializableAsBase() const {
+ return IsZeroInitializableAsBase;
+ }
+
/// \brief Return llvm::StructType element number that corresponds to the
/// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
@@ -215,8 +249,15 @@ public:
}
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
- assert(NonVirtualBaseFields.count(RD) && "Invalid non-virtual base!");
- return NonVirtualBaseFields.lookup(RD);
+ assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBases.lookup(RD);
+ }
+
+ /// \brief Return the LLVM field index corresponding to the given
+ /// virtual base. Only valid when operating on the complete object.
+ unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
+ assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
+ return CompleteObjectVirtualBases.lookup(base);
}
/// \brief Return the BitFieldInfo that corresponds to the field FD.
@@ -224,7 +265,7 @@ public:
assert(FD->isBitField() && "Invalid call for non bit-field decl!");
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
it = BitFields.find(FD);
- assert(it != BitFields.end() && "Unable to find bitfield info");
+ assert(it != BitFields.end() && "Unable to find bitfield info");
return it->second;
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 77a319fa3ab1..4b19aefcf9c4 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -14,6 +14,7 @@
#include "CGRecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
@@ -27,28 +28,52 @@
using namespace clang;
using namespace CodeGen;
-namespace clang {
-namespace CodeGen {
+namespace {
class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
+ ///
std::vector<const llvm::Type *> FieldTypes;
- /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
- typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
- llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
-
- /// LLVMBitFieldInfo - Holds location and size information about a bit field.
- typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
- llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
-
- typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
- llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
+ /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
+ /// of the struct. For example, consider:
+ ///
+ /// struct A { int i; };
+ /// struct B { void *v; };
+ /// struct C : virtual A, B { };
+ ///
+ /// The LLVM type of C will be
+ /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
+ ///
+ /// And the LLVM type of the non-virtual base struct will be
+ /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
+ ///
+ /// This only gets initialized if the base subobject type is
+ /// different from the complete-object type.
+ const llvm::StructType *BaseSubobjectType;
+
+ /// FieldInfo - Holds a field and its corresponding LLVM field number.
+ llvm::DenseMap<const FieldDecl *, unsigned> Fields;
+
+ /// BitFieldInfo - Holds location and size information about a bit field.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
+ /// avoid laying out virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
/// IsZeroInitializable - Whether this struct can be C++
/// zero-initialized with an LLVM zeroinitializer.
bool IsZeroInitializable;
+ bool IsZeroInitializableAsBase;
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
@@ -59,18 +84,14 @@ private:
/// Alignment - Contains the alignment of the RecordDecl.
//
// FIXME: This is not needed and should be removed.
- unsigned Alignment;
-
- /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
- /// LLVM types.
- unsigned AlignmentAsLLVMStruct;
+ CharUnits Alignment;
/// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
/// this will have the number of bits still available in the field.
char BitsAvailableInLastField;
- /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
- uint64_t NextFieldOffsetInBytes;
+ /// NextFieldOffset - Holds the next field offset.
+ CharUnits NextFieldOffset;
/// LayoutUnionField - Will layout a field in an union and return the type
/// that the field will have.
@@ -84,14 +105,30 @@ private:
/// Returns false if the operation failed because the struct is not packed.
bool LayoutFields(const RecordDecl *D);
+ /// Layout a single base, virtual or non-virtual
+ void LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBase - layout a single virtual base.
+ void LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBases - layout the virtual bases of a record decl.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
/// LayoutNonVirtualBase - layout a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
- uint64_t BaseOffset);
+ void LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
- /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
+ /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
void LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
+ /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
+ bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
+
/// LayoutField - layout a single field. Returns false if the operation failed
/// because the current struct is not packed.
bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
@@ -100,41 +137,47 @@ private:
void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
/// AppendField - Appends a field with the given offset and type.
- void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+ void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
- void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+ void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
+ /// getByteArrayType - Returns a byte array type with the given number of
+ /// elements.
+ const llvm::Type *getByteArrayType(CharUnits NumBytes);
+
/// AppendBytes - Append a given number of bytes to the record.
- void AppendBytes(uint64_t NumBytes);
+ void AppendBytes(CharUnits numBytes);
/// AppendTailPadding - Append enough tail padding so that the type will have
/// the passed size.
void AppendTailPadding(uint64_t RecordSize);
- unsigned getTypeAlignment(const llvm::Type *Ty) const;
+ CharUnits getTypeAlignment(const llvm::Type *Ty) const;
+
+ /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
+ /// LLVM element types.
+ CharUnits getAlignmentAsLLVMStruct() const;
/// CheckZeroInitializable - Check if the given type contains a pointer
/// to data member.
void CheckZeroInitializable(QualType T);
- void CheckZeroInitializable(const CXXRecordDecl *RD);
public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
- : IsZeroInitializable(true), Packed(false), Types(Types),
- Alignment(0), AlignmentAsLLVMStruct(1),
- BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+ : BaseSubobjectType(0),
+ IsZeroInitializable(true), IsZeroInitializableAsBase(true),
+ Packed(false), Types(Types), BitsAvailableInLastField(0) { }
/// Layout - Will layout a RecordDecl.
void Layout(const RecordDecl *D);
};
}
-}
void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
- Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
Packed = D->hasAttr<PackedAttr>();
if (D->isUnion()) {
@@ -147,12 +190,12 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
// We weren't able to layout the struct. Try again with a packed struct
Packed = true;
- AlignmentAsLLVMStruct = 1;
- NextFieldOffsetInBytes = 0;
+ NextFieldOffset = CharUnits::Zero();
FieldTypes.clear();
- LLVMFields.clear();
- LLVMBitFields.clear();
- LLVMNonVirtualBases.clear();
+ Fields.clear();
+ BitFields.clear();
+ NonVirtualBases.clear();
+ VirtualBases.clear();
LayoutFields(D);
}
@@ -182,6 +225,12 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
FieldSize = TypeSizeInBits;
}
+ // in big-endian machines the first fields are in higher bit positions,
+ // so revert the offset. The byte offsets are reversed(back) later.
+ if (Types.getTargetData().isBigEndian()) {
+ FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
+ }
+
// Compute the access components. The policy we use is to start by attempting
// to access using the width of the bit-field type itself and to always access
// at aligned indices of that type. If such an access would fail because it
@@ -189,7 +238,6 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// power of two and retry. The current algorithm assumes pow2 sized types,
// although this is easy to fix.
//
- // FIXME: This algorithm is wrong on big-endian systems, I think.
assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
CGBitFieldInfo::AccessInfo Components[3];
unsigned NumComponents = 0;
@@ -237,7 +285,15 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// FIXME: We still follow the old access pattern of only using the field
// byte offset. We should switch this once we fix the struct layout to be
// pretty.
- AI.FieldByteOffset = AccessStart / 8;
+
+ // on big-endian machines we reverted the bit offset because first fields are
+ // in higher bits. But this also reverts the bytes, so fix this here by reverting
+ // the byte offset on big-endian machines.
+ if (Types.getTargetData().isBigEndian()) {
+ AI.FieldByteOffset = (ContainingTypeSizeInBits - AccessStart - AccessWidth )/8;
+ } else {
+ AI.FieldByteOffset = AccessStart / 8;
+ }
AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
AI.AccessWidth = AccessWidth;
AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
@@ -258,56 +314,54 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
- uint64_t ContainingTypeSizeInBits = RL.getSize();
- unsigned ContainingTypeAlign = RL.getAlignment();
+ uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
+ unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
ContainingTypeAlign);
}
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
- uint64_t FieldOffset) {
- uint64_t FieldSize =
+ uint64_t fieldOffset) {
+ uint64_t fieldSize =
D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
- if (FieldSize == 0)
+ if (fieldSize == 0)
return;
- uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
- unsigned NumBytesToAppend;
+ uint64_t nextFieldOffsetInBits = NextFieldOffset.getQuantity() * 8;
+ unsigned numBytesToAppend;
- if (FieldOffset < NextFieldOffset) {
+ if (fieldOffset < nextFieldOffsetInBits) {
assert(BitsAvailableInLastField && "Bitfield size mismatch!");
- assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
+ assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
// The bitfield begins in the previous bit-field.
- NumBytesToAppend =
- llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
+ numBytesToAppend =
+ llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 8) / 8;
} else {
- assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
+ assert(fieldOffset % 8 == 0 && "Field offset not aligned correctly");
// Append padding if necessary.
- AppendBytes((FieldOffset - NextFieldOffset) / 8);
+ AppendPadding(CharUnits::fromQuantity(fieldOffset / 8), CharUnits::One());
- NumBytesToAppend =
- llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+ numBytesToAppend = llvm::RoundUpToAlignment(fieldSize, 8) / 8;
- assert(NumBytesToAppend && "No bytes to append!");
+ assert(numBytesToAppend && "No bytes to append!");
}
// Add the bit field info.
- LLVMBitFields.push_back(
- LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
- FieldSize)));
+ BitFields.insert(std::make_pair(D,
+ CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
- AppendBytes(NumBytesToAppend);
+ AppendBytes(CharUnits::fromQuantity(numBytesToAppend));
BitsAvailableInLastField =
- NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
+ NextFieldOffset.getQuantity() * 8 - (fieldOffset + fieldSize);
}
bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
- uint64_t FieldOffset) {
+ uint64_t fieldOffset) {
// If the field is packed, then we need a packed struct.
if (!Packed && D->hasAttr<PackedAttr>())
return false;
@@ -318,54 +372,50 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (!Packed && !D->getDeclName())
return false;
- LayoutBitField(D, FieldOffset);
+ LayoutBitField(D, fieldOffset);
return true;
}
CheckZeroInitializable(D->getType());
- assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
- uint64_t FieldOffsetInBytes = FieldOffset / 8;
+ assert(fieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
+ CharUnits fieldOffsetInBytes = CharUnits::fromQuantity(fieldOffset / 8);
const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
- unsigned TypeAlignment = getTypeAlignment(Ty);
+ CharUnits typeAlignment = getTypeAlignment(Ty);
// If the type alignment is larger then the struct alignment, we must use
// a packed struct.
- if (TypeAlignment > Alignment) {
+ if (typeAlignment > Alignment) {
assert(!Packed && "Alignment is wrong even with packed struct!");
return false;
}
- if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
- const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
- if (const MaxFieldAlignmentAttr *MFAA =
- RD->getAttr<MaxFieldAlignmentAttr>()) {
- if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
- return false;
+ if (!Packed) {
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != typeAlignment.getQuantity() * 8)
+ return false;
+ }
}
}
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
+ CharUnits alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
- if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
assert(!Packed && "Could not place field even with packed struct!");
return false;
}
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
- // Even with alignment, the field offset is not at the right place,
- // insert padding.
- uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
-
- AppendBytes(PaddingInBytes);
- }
+ AppendPadding(fieldOffsetInBytes, typeAlignment);
// Now append the field.
- LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
- AppendField(FieldOffsetInBytes, Ty);
+ Fields[D] = FieldTypes.size();
+ AppendField(fieldOffsetInBytes, Ty);
return true;
}
@@ -389,95 +439,167 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
// Add the bit field info.
- LLVMBitFields.push_back(
- LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
- 0, FieldSize)));
+ BitFields.insert(std::make_pair(Field,
+ CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
return FieldTy;
}
// This is a regular union field.
- LLVMFields.push_back(LLVMFieldInfo(Field, 0));
+ Fields[Field] = 0;
return Types.ConvertTypeForMemRecursive(Field->getType());
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
- const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+ const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
- const llvm::Type *Ty = 0;
- uint64_t Size = 0;
- unsigned Align = 0;
+ const llvm::Type *unionType = 0;
+ CharUnits unionSize = CharUnits::Zero();
+ CharUnits unionAlign = CharUnits::Zero();
- bool HasOnlyZeroSizedBitFields = true;
+ bool hasOnlyZeroSizedBitFields = true;
- unsigned FieldNo = 0;
- for (RecordDecl::field_iterator Field = D->field_begin(),
- FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
- assert(Layout.getFieldOffset(FieldNo) == 0 &&
+ unsigned fieldNo = 0;
+ for (RecordDecl::field_iterator field = D->field_begin(),
+ fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
+ assert(layout.getFieldOffset(fieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
- const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
+ const llvm::Type *fieldType = LayoutUnionField(*field, layout);
- if (!FieldTy)
+ if (!fieldType)
continue;
- HasOnlyZeroSizedBitFields = false;
+ hasOnlyZeroSizedBitFields = false;
- unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
- uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
+ CharUnits fieldAlign = CharUnits::fromQuantity(
+ Types.getTargetData().getABITypeAlignment(fieldType));
+ CharUnits fieldSize = CharUnits::fromQuantity(
+ Types.getTargetData().getTypeAllocSize(fieldType));
- if (FieldAlign < Align)
+ if (fieldAlign < unionAlign)
continue;
- if (FieldAlign > Align || FieldSize > Size) {
- Ty = FieldTy;
- Align = FieldAlign;
- Size = FieldSize;
+ if (fieldAlign > unionAlign || fieldSize > unionSize) {
+ unionType = fieldType;
+ unionAlign = fieldAlign;
+ unionSize = fieldSize;
}
}
// Now add our field.
- if (Ty) {
- AppendField(0, Ty);
+ if (unionType) {
+ AppendField(CharUnits::Zero(), unionType);
- if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
+ if (getTypeAlignment(unionType) > layout.getAlignment()) {
// We need a packed struct.
Packed = true;
- Align = 1;
+ unionAlign = CharUnits::One();
}
}
- if (!Align) {
- assert(HasOnlyZeroSizedBitFields &&
+ if (unionAlign.isZero()) {
+ assert(hasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
- Align = 1;
+ unionAlign = CharUnits::One();
}
// Append tail padding.
- if (Layout.getSize() / 8 > Size)
- AppendPadding(Layout.getSize() / 8, Align);
+ CharUnits recordSize = layout.getSize();
+ if (recordSize > unionSize)
+ AppendPadding(recordSize, unionAlign);
+}
+
+void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset) {
+ AppendPadding(baseOffset, CharUnits::One());
+
+ const ASTRecordLayout &baseASTLayout
+ = Types.getContext().getASTRecordLayout(base);
+
+ // Fields and bases can be laid out in the tail padding of previous
+ // bases. If this happens, we need to allocate the base as an i8
+ // array; otherwise, we can use the subobject type. However,
+ // actually doing that would require knowledge of what immediately
+ // follows this base in the layout, so instead we do a conservative
+ // approximation, which is to use the base subobject type if it
+ // has the same LLVM storage size as the nvsize.
+
+ // The nvsize, i.e. the unpadded size of the base class.
+ CharUnits nvsize = baseASTLayout.getNonVirtualSize();
+
+#if 0
+ const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ const llvm::StructLayout *baseLLVMLayout =
+ Types.getTargetData().getStructLayout(subobjectType);
+ CharUnits stsize = CharUnits::fromQuantity(baseLLVMLayout->getSizeInBytes());
+
+ if (nvsize == stsize)
+ AppendField(baseOffset, subobjectType);
+ else
+#endif
+ AppendBytes(nvsize);
}
-void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
- uint64_t BaseOffset) {
- const ASTRecordLayout &Layout =
- Types.getContext().getASTRecordLayout(BaseDecl);
+void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return;
- uint64_t NonVirtualSize = Layout.getNonVirtualSize();
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializableAsBase) {
+ assert(IsZeroInitializable &&
+ "class zero-initializable as base but not as complete object");
- if (BaseDecl->isEmpty()) {
- // FIXME: Lay out empty bases.
- return;
+ IsZeroInitializable = IsZeroInitializableAsBase =
+ baseLayout.isZeroInitializableAsBase();
}
- CheckZeroInitializable(BaseDecl);
+ LayoutBase(base, baseLayout, baseOffset);
+ NonVirtualBases[base] = (FieldTypes.size() - 1);
+}
- // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
- AppendPadding(BaseOffset / 8, 1);
-
- // Append the base field.
- LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
+void
+CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializable)
+ IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
+
+ LayoutBase(base, baseLayout, baseOffset);
+ VirtualBases[base] = (FieldTypes.size() - 1);
+}
- AppendBytes(NonVirtualSize / 8);
+/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
+void
+CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We only want to lay out virtual bases that aren't indirect primary bases
+ // of some other base.
+ if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
+ // Only lay out the base once.
+ if (!LaidOutVirtualBases.insert(BaseDecl))
+ continue;
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ LayoutVirtualBase(BaseDecl, vbaseOffset);
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ LayoutVirtualBases(BaseDecl, Layout);
+ }
}
void
@@ -493,13 +615,14 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
/*isVarArg=*/true);
const llvm::Type *VTableTy = FunctionType->getPointerTo();
- assert(NextFieldOffsetInBytes == 0 &&
+ assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
- AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
} else {
- // FIXME: Handle a virtual primary base.
- if (!Layout.getPrimaryBaseWasVirtual())
- LayoutNonVirtualBase(PrimaryBase, 0);
+ if (!Layout.isPrimaryBaseVirtual())
+ LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
+ else
+ LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
}
}
@@ -513,20 +636,61 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// We've already laid out the primary base.
- if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
+ if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
continue;
LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
}
}
+bool
+CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
+
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ // First check if we can use the same fields as for the complete class.
+ CharUnits RecordSize = Layout.getSize();
+ if (AlignedNonVirtualTypeSize == RecordSize)
+ return true;
+
+ // Check if we need padding.
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
+ assert(!Packed && "cannot layout even as packed struct");
+ return false; // Needs packing.
+ }
+
+ bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
+ if (needsPadding) {
+ CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
+ FieldTypes.push_back(getByteArrayType(NumBytes));
+ }
+
+ BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
+ FieldTypes, Packed);
+
+ if (needsPadding) {
+ // Pull the padding back off.
+ FieldTypes.pop_back();
+ }
+
+ return true;
+}
+
bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
assert(!D->isUnion() && "Can't call LayoutFields on a union!");
- assert(Alignment && "Did not set alignment!");
+ assert(!Alignment.isZero() && "Did not set alignment!");
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
+ if (RD)
LayoutNonVirtualBases(RD, Layout);
unsigned FieldNo = 0;
@@ -540,8 +704,23 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
}
}
+ if (RD) {
+ // We've laid out the non-virtual bases and the fields, now compute the
+ // non-virtual base field types.
+ if (!ComputeNonVirtualBaseType(RD)) {
+ assert(!Packed && "Could not layout even with a packed LLVM struct!");
+ return false;
+ }
+
+ // And lay out the virtual bases.
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+ LayoutVirtualBases(RD, Layout);
+ }
+
// Append tail padding if necessary.
- AppendTailPadding(Layout.getSize());
+ AppendTailPadding(Types.getContext().toBits(Layout.getSize()));
return true;
}
@@ -549,129 +728,137 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
assert(RecordSize % 8 == 0 && "Invalid record size!");
- uint64_t RecordSizeInBytes = RecordSize / 8;
- assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+ CharUnits RecordSizeInBytes = CharUnits::fromQuantity(RecordSize / 8);
+ assert(NextFieldOffset <= RecordSizeInBytes && "Size mismatch!");
- uint64_t AlignedNextFieldOffset =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
if (AlignedNextFieldOffset == RecordSizeInBytes) {
// We don't need any padding.
return;
}
- unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ CharUnits NumPadBytes = RecordSizeInBytes - NextFieldOffset;
AppendBytes(NumPadBytes);
}
-void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
- const llvm::Type *FieldTy) {
- AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
- getTypeAlignment(FieldTy));
+void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
+ const llvm::Type *fieldType) {
+ CharUnits fieldSize =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
- uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
+ FieldTypes.push_back(fieldType);
- FieldTypes.push_back(FieldTy);
-
- NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
+ NextFieldOffset = fieldOffset + fieldSize;
BitsAvailableInLastField = 0;
}
-void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
- unsigned FieldAlignment) {
- assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
+void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
+ CharUnits fieldAlignment) {
+ assert(NextFieldOffset <= fieldOffset &&
"Incorrect field layout!");
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ if (alignedNextFieldOffset < fieldOffset) {
// Even with alignment, the field offset is not at the right place,
// insert padding.
- uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+ CharUnits padding = fieldOffset - NextFieldOffset;
- AppendBytes(PaddingInBytes);
+ AppendBytes(padding);
}
}
-void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
- if (NumBytes == 0)
- return;
+const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+ assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ if (numBytes > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
+
+ return Ty;
+}
+
+void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
+ if (numBytes.isZero())
+ return;
// Append the padding field
- AppendField(NextFieldOffsetInBytes, Ty);
+ AppendField(NextFieldOffset, getByteArrayType(numBytes));
}
-unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
if (Packed)
- return 1;
+ return CharUnits::One();
- return Types.getTargetData().getABITypeAlignment(Ty);
+ return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
}
+CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
+ if (Packed)
+ return CharUnits::One();
+
+ CharUnits maxAlignment = CharUnits::One();
+ for (size_t i = 0; i != FieldTypes.size(); ++i)
+ maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
+
+ return maxAlignment;
+}
+
+/// Merge in whether a field of the given type is zero-initializable.
void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
// This record already contains a member pointer.
- if (!IsZeroInitializable)
+ if (!IsZeroInitializableAsBase)
return;
// Can only have member pointers if we're compiling C++.
if (!Types.getContext().getLangOptions().CPlusPlus)
return;
- T = Types.getContext().getBaseElementType(T);
+ const Type *elementType = T->getBaseElementTypeUnsafe();
- if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
if (!Types.getCXXABI().isZeroInitializable(MPT))
- IsZeroInitializable = false;
- } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- CheckZeroInitializable(RD);
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
}
}
-void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
- // This record already contains a member pointer.
- if (!IsZeroInitializable)
- return;
-
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- Types.ConvertTagDeclType(RD);
-
- const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
-
- if (!Layout.isZeroInitializable())
- IsZeroInitializable = false;
-}
-
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
CGRecordLayoutBuilder Builder(*this);
Builder.Layout(D);
- const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
- Builder.FieldTypes,
- Builder.Packed);
+ const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
+ Builder.FieldTypes,
+ Builder.Packed);
+
+ // If we're in C++, compute the base subobject type.
+ const llvm::StructType *BaseTy = 0;
+ if (isa<CXXRecordDecl>(D)) {
+ BaseTy = Builder.BaseSubobjectType;
+ if (!BaseTy) BaseTy = Ty;
+ }
CGRecordLayout *RL =
- new CGRecordLayout(Ty, Builder.IsZeroInitializable);
+ new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
+ Builder.IsZeroInitializableAsBase);
- // Add all the non-virtual base field numbers.
- RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
- Builder.LLVMNonVirtualBases.end());
+ RL->NonVirtualBases.swap(Builder.NonVirtualBases);
+ RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
// Add all the field numbers.
- RL->FieldInfo.insert(Builder.LLVMFields.begin(),
- Builder.LLVMFields.end());
+ RL->FieldInfo.swap(Builder.Fields);
// Add bitfield info.
- RL->BitFields.insert(Builder.LLVMBitFields.begin(),
- Builder.LLVMBitFields.end());
+ RL->BitFields.swap(Builder.BitFields);
// Dump the layout, if requested.
if (getContext().getLangOptions().DumpRecordLayouts) {
@@ -684,10 +871,26 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
#ifndef NDEBUG
// Verify that the computed LLVM struct size matches the AST layout size.
- uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
+
+ uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
"Type size mismatch!");
+ if (BaseTy) {
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ uint64_t AlignedNonVirtualTypeSizeInBits =
+ getContext().toBits(AlignedNonVirtualTypeSize);
+
+ assert(AlignedNonVirtualTypeSizeInBits ==
+ getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ "Type size mismatch!");
+ }
+
// Verify that the LLVM and AST field offsets agree.
const llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
@@ -729,7 +932,9 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
void CGRecordLayout::print(llvm::raw_ostream &OS) const {
OS << "<CGRecordLayout\n";
- OS << " LLVMType:" << *LLVMType << "\n";
+ OS << " LLVMType:" << *CompleteObjectType << "\n";
+ if (BaseSubobjectType)
+ OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
OS << " BitFields:[\n";
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 16145f766af2..cd238112ed1d 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
@@ -69,24 +70,54 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitStopPoint(S);
switch (S->getStmtClass()) {
- default:
- // Must be an expression in a stmt context. Emit the value (to get
- // side-effects) and ignore the result.
- if (!isa<Expr>(S))
- ErrorUnsupported(S, "statement");
-
- EmitAnyExpr(cast<Expr>(S), 0, false, true);
-
- // Expression emitters don't handle unreachable blocks yet, so look for one
- // explicitly here. This handles the common case of a call to a noreturn
- // function.
- if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty()) {
- CurBB->eraseFromParent();
- Builder.ClearInsertionPoint();
- }
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
}
break;
+ }
+
case Stmt::IndirectGotoStmtClass:
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
@@ -146,7 +177,7 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- llvm::Value *AggLoc, bool isAggVol) {
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -178,13 +209,13 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
// emitting them before we evaluate the subexpr.
const Stmt *LastStmt = S.body_back();
while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
- EmitLabel(*LS);
+ EmitLabel(LS->getDecl());
LastStmt = LS->getSubStmt();
}
EnsureInsertPoint();
- RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
}
return RV;
@@ -246,24 +277,24 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
}
CodeGenFunction::JumpDest
-CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
- JumpDest &Dest = LabelMap[S];
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
if (Dest.isValid()) return Dest;
// Create, but don't insert, the new block.
- Dest = JumpDest(createBasicBlock(S->getName()),
+ Dest = JumpDest(createBasicBlock(D->getName()),
EHScopeStack::stable_iterator::invalid(),
NextCleanupDestIndex++);
return Dest;
}
-void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- JumpDest &Dest = LabelMap[&S];
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
// If we didn't need a forward reference to this label, just go
// ahead and create a destination at the current scope.
if (!Dest.isValid()) {
- Dest = getJumpDestInCurrentScope(S.getName());
+ Dest = getJumpDestInCurrentScope(D->getName());
// Otherwise, we need to give this label a target depth and remove
// it from the branch-fixups list.
@@ -281,7 +312,7 @@ void CodeGenFunction::EmitLabel(const LabelStmt &S) {
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
- EmitLabel(S);
+ EmitLabel(S.getDecl());
EmitStmt(S.getSubStmt());
}
@@ -297,10 +328,14 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
// Ensure that we have an i8* for our PHI node.
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
- llvm::Type::getInt8PtrTy(VMContext),
- "addr");
+ Int8PtrTy, "addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
@@ -320,7 +355,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
@@ -395,7 +430,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -527,7 +562,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// declaration.
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (S.getConditionVariable()) {
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
}
// If there are any cleanups between here and the loop-exit scope,
@@ -621,11 +656,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
- if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
- const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
- llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
- Builder.CreateStore(One, NRVOFlag);
- }
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateStore(Builder.getTrue(), NRVOFlag);
} else if (!ReturnValue) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
@@ -643,7 +675,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, ReturnValue, false);
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -713,7 +745,8 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
// Range is small enough to add multiple switch instruction cases.
for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), LHS),
+ CaseDest);
LHS++;
}
return;
@@ -735,10 +768,10 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// Emit range check.
llvm::Value *Diff =
Builder.CreateSub(SwitchInsn->getCondition(),
- llvm::ConstantInt::get(VMContext, LHS), "tmp");
+ llvm::ConstantInt::get(getLLVMContext(), LHS), "tmp");
llvm::Value *Cond =
- Builder.CreateICmpULE(Diff,
- llvm::ConstantInt::get(VMContext, Range), "tmp");
+ Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(getLLVMContext(), Range),
+ "inbounds");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
// Restore the appropriate insertion point.
@@ -757,7 +790,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
// Recursively emitting the statement is acceptable, but is not wonderful for
// code where we have many case statements nested together, i.e.:
@@ -775,7 +809,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
while (NextCase && NextCase->getRHS() == 0) {
CurCase = NextCase;
CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
@@ -798,7 +833,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
llvm::Value *CondV = EmitScalarExpr(S.getCond());
@@ -861,14 +896,11 @@ static std::string
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
- std::string tmp;
while (*Constraint) {
switch (*Constraint) {
default:
- tmp = Target.convertConstraint(*Constraint);
- if (Result.find(tmp) == std::string::npos) // Combine unique constraints
- Result += tmp;
+ Result += Target.convertConstraint(*Constraint);
break;
// Ignore these
case '*':
@@ -877,8 +909,8 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
case '=': // Will see this and the following in mult-alt constraints.
case '+':
break;
- case ',': // FIXME - Until the back-end properly supports
- return Result; // multiple alternative constraints, we stop here.
+ case ',':
+ Result += "|";
break;
case 'g':
Result += "imr";
@@ -890,7 +922,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
bool result = Target.resolveSymbolicName(Constraint,
&(*OutCons)[0],
OutCons->size(), Index);
- assert(result && "Could not resolve symbolic name"); result=result;
+ assert(result && "Could not resolve symbolic name"); (void)result;
Result += llvm::utostr(Index);
break;
}
@@ -902,6 +934,33 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
return Result;
}
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ llvm::StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // FIXME: We should check which registers are compatible with "r" or "x".
+ if (Constraint != "r" && Constraint != "x") {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ return "{" + Register.str() + "}";
+}
+
llvm::Value*
CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
@@ -915,7 +974,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
- Ty = llvm::IntegerType::get(VMContext, Size);
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
@@ -946,6 +1005,35 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
}
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ llvm::SmallVector<llvm::Value *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ Str->getLocStart().getRawEncoding()));
+ llvm::StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOptions();
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
+ CGF.Target);
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ LineLoc.getRawEncoding()));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs.data(), Locs.size());
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
@@ -1010,6 +1098,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, Target,
+ CGM, S);
+
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
Constraints += ',';
@@ -1044,6 +1135,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
} else {
ArgTypes.push_back(Dest.getAddress()->getType());
Args.push_back(Dest.getAddress());
@@ -1083,6 +1178,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
&OutputConstraintInfos);
+ InputConstraint =
+ AddVariableConstraints(InputConstraint,
+ *InputExpr->IgnoreParenNoopCasts(getContext()),
+ Target, CGM, S);
+
llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
@@ -1107,7 +1207,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
-
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
@@ -1145,11 +1248,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const llvm::Type *ResultType;
if (ResultRegTypes.empty())
- ResultType = llvm::Type::getVoidTy(VMContext);
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
else if (ResultRegTypes.size() == 1)
ResultType = ResultRegTypes[0];
else
- ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
const llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
@@ -1162,10 +1265,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
- unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
- llvm::Value *LocIDC =
- llvm::ConstantInt::get(Int32Ty, LocID);
- Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1192,16 +1292,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)ResSize));
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)TmpSize));
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
} else if (TruncTy->isIntegerTy()) {
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
}
}
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index dfb8dc63c5c5..3b4c50910bf8 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -16,39 +16,11 @@ using namespace clang;
using namespace CodeGen;
namespace {
- struct DestroyTemporary : EHScopeStack::Cleanup {
- const CXXTemporary *Temporary;
- llvm::Value *Addr;
- llvm::Value *CondPtr;
-
- DestroyTemporary(const CXXTemporary *Temporary, llvm::Value *Addr,
- llvm::Value *CondPtr)
- : Temporary(Temporary), Addr(Addr), CondPtr(CondPtr) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::BasicBlock *CondEnd = 0;
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (CondPtr) {
- llvm::BasicBlock *CondBlock =
- CGF.createBasicBlock("temp.cond-dtor.call");
- CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
-
- llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
- CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- CGF.EmitBlock(CondBlock);
- }
-
- CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Addr);
-
- if (CondPtr) {
- // Reset the condition to false.
- CGF.Builder.CreateStore(CGF.Builder.getFalse(), CondPtr);
- CGF.EmitBlock(CondEnd);
- }
+ struct DestroyTemporary {
+ static void Emit(CodeGenFunction &CGF, bool forEH,
+ const CXXDestructorDecl *dtor, llvm::Value *addr) {
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ addr);
}
};
}
@@ -56,37 +28,19 @@ namespace {
/// Emits all the code to cause the given temporary to be cleaned up.
void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
llvm::Value *Ptr) {
- llvm::AllocaInst *CondPtr = 0;
-
- // Check if temporaries need to be conditional. If so, we'll create a
- // condition boolean, initialize it to 0 and
- if (ConditionalBranchLevel != 0) {
- CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
-
- // Initialize it to false. This initialization takes place right after
- // the alloca insert point.
- InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
-
- // Now set it to true.
- Builder.CreateStore(Builder.getTrue(), CondPtr);
- }
-
- EHStack.pushCleanup<DestroyTemporary>(NormalAndEHCleanup,
- Temporary, Ptr, CondPtr);
+ pushFullExprCleanup<DestroyTemporary>(NormalAndEHCleanup,
+ Temporary->getDestructor(),
+ Ptr);
}
RValue
-CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
- llvm::Value *AggLoc,
- bool IsAggLocVolatile,
- bool IsInitializer) {
+CodeGenFunction::EmitExprWithCleanups(const ExprWithCleanups *E,
+ AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
- /*IgnoreResult=*/false, IsInitializer);
+ return EmitAnyExpr(E->getSubExpr(), Slot);
}
-LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
- const CXXExprWithTemporaries *E) {
+LValue CodeGenFunction::EmitExprWithCleanupsLValue(const ExprWithCleanups *E) {
RunCleanupsScope Scope(*this);
return EmitLValue(E->getSubExpr());
}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 56acfc84802e..78b2dbe2ba22 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -218,7 +218,7 @@ void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
+ Layout.getBaseClassOffsetInBits(BaseDecl);
// Layout the VTT for this base.
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
@@ -262,14 +262,15 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
if (!VBases.insert(BaseDecl))
continue;
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
BaseDeclIsMorallyVirtual = true;
} else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffset =
+ Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
- if (!Layout.getPrimaryBaseWasVirtual() &&
+ if (!Layout.isPrimaryBaseVirtual() &&
Layout.getPrimaryBase() == BaseDecl)
BaseDeclIsNonVirtualPrimaryBase = true;
}
@@ -316,7 +317,7 @@ void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
continue;
uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
}
@@ -365,57 +366,52 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
}
-llvm::GlobalVariable *
-CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD) {
- // Only classes that have virtual bases need a VTT.
- if (RD->getNumVBases() == 0)
- return 0;
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true);
- llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, OutName);
- llvm::StringRef Name = OutName.str();
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ const llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::Constant *Init =
+ llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents().data(),
+ Builder.getVTTComponents().size());
+
+ VTT->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTT->setLinkage(Linkage);
- D1(printf("vtt %s\n", RD->getNameAsCString()));
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTT, RD, CodeGenModule::TVK_ForVTT);
+}
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
- if (GV == 0 || GV->isDeclaration()) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
- VTTBuilder Builder(CGM, RD, GenerateDefinition);
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
- const llvm::ArrayType *Type =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
- llvm::Constant *Init = 0;
- if (GenerateDefinition)
- Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(),
- Builder.getVTTComponents().size());
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ const llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
- llvm::GlobalVariable *OldGV = GV;
- GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
- Linkage, Init, Name);
- CGM.setGlobalVisibility(GV, RD);
-
- if (OldGV) {
- GV->takeName(OldGV);
- llvm::Constant *NewPtr =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtr);
- OldGV->eraseFromParent();
- }
- }
-
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
return GV;
}
-llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) {
- return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
- /*GenerateDefinition=*/false, RD);
-}
-
bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index bed4670f7f95..891697f4cd09 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -240,7 +240,7 @@ static BaseOffset ComputeBaseOffset(ASTContext &Context,
const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
- NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ NonVirtualOffset += Layout.getBaseClassOffsetInBits(Base);
}
// FIXME: This should probably use CharUnits or something. Maybe we should
@@ -358,12 +358,12 @@ FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- uint64_t Offset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t Offset = Layout.getBaseClassOffsetInBits(BaseDecl);
BaseOffset = Base.getBaseOffset() + Offset;
BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
@@ -396,9 +396,9 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
continue;
}
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
- BaseOffset = Layout.getBaseClassOffset(BaseDecl) +
+ BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl) +
Base.getBaseOffset();
}
@@ -793,22 +793,22 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
// (Since we're emitting the vcall and vbase offsets in reverse order, we'll
// emit them for the primary base first).
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- bool PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+ bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
uint64_t PrimaryBaseOffset;
// Get the base offset of the primary base.
if (PrimaryBaseIsVirtual) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
@@ -849,9 +849,9 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Handle the primary base first.
// We only want to add vcall offsets if the base is non-virtual; a virtual
// primary base will have its vcall and vbase offsets emitted already.
- if (PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) {
+ if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
// Get the base offset of the primary base.
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
@@ -903,7 +903,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Get the base offset of this base.
uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
+ Layout.getBaseClassOffsetInBits(BaseDecl);
AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
}
@@ -924,7 +924,7 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
// FIXME: We shouldn't use / 8 here.
int64_t Offset =
- (int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
+ (int64_t)(LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl) -
OffsetInLayoutClass) / 8;
// Add the vbase offset offset.
@@ -1372,7 +1372,7 @@ VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
/// Get the virtual base offset, relative to the most derived class
/// layout.
OffsetToBaseSubobject +=
- LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(Offset.VirtualBase);
} else {
// Otherwise, the non-virtual offset is relative to the derived class
// offset.
@@ -1520,8 +1520,8 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
if (!PrimaryBase)
break;
- if (Layout.getPrimaryBaseWasVirtual()) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should always be at offset 0!");
const ASTRecordLayout &LayoutClassLayout =
@@ -1529,13 +1529,13 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
// Now check if this is the primary base that is not a primary base in the
// most derived class.
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
FirstBaseOffsetInLayoutClass) {
// We found it, stop walking the chain.
break;
}
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should always be at offset 0!");
}
@@ -1586,23 +1586,23 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
uint64_t PrimaryBaseOffset;
uint64_t PrimaryBaseOffsetInLayoutClass;
- if (Layout.getPrimaryBaseWasVirtual()) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
@@ -1664,9 +1664,18 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
if (ThisAdjustment.VCallOffsetOffset &&
Overrider.Method->getParent() == MostDerivedClass) {
+
+ // There's no return adjustment from OverriddenMD and MD,
+ // but that doesn't mean there isn't one between MD and
+ // the final overrider.
+ BaseOffset ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
// This is a virtual thunk for the most derived class, add it.
AddThunk(Overrider.Method,
- ThunkInfo(ThisAdjustment, ReturnAdjustment()));
+ ThunkInfo(ThisAdjustment, ReturnAdjustment));
}
}
@@ -1779,13 +1788,13 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
if (!PrimaryBase)
break;
- if (Layout.getPrimaryBaseWasVirtual()) {
+ if (Layout.isPrimaryBaseVirtual()) {
// Check if this virtual primary base is a primary base in the layout
// class. If it's not, we don't want to add it.
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
OffsetInLayoutClass) {
// We don't want to add this class (or any of its primary bases).
break;
@@ -1835,7 +1844,7 @@ void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
}
// Get the base offset of this base.
- uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t RelativeBaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
@@ -1866,7 +1875,7 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
// Check if it's virtual.
- if (Layout.getPrimaryBaseWasVirtual()) {
+ if (Layout.isPrimaryBaseVirtual()) {
bool IsPrimaryVirtualBase = true;
if (isBuildingConstructorVTable()) {
@@ -1876,7 +1885,7 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
Context.getASTRecordLayout(LayoutClass);
uint64_t PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
// We know that the base is not a primary base in the layout class if
// the base offsets are different.
@@ -1904,10 +1913,11 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- BaseOffsetInLayoutClass = LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
BaseOffsetInLayoutClass =
- OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ OffsetInLayoutClass + Layout.getBaseClassOffsetInBits(BaseDecl);
}
DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
@@ -1933,12 +1943,12 @@ VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
uint64_t BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
/*BaseIsMorallyVirtual=*/true,
@@ -2230,6 +2240,19 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
}
+static void
+CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
+ while (RD) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ if (PrimaryBase)
+ PrimaryBases.insert(PrimaryBase);
+
+ RD = PrimaryBase;
+ }
+}
+
void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
// Itanium C++ ABI 2.5.2:
@@ -2258,10 +2281,7 @@ void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
// Collect all the primary bases, so we can check whether methods override
// a method from the base.
VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
- for (ASTRecordLayout::primary_base_info_iterator
- I = Layout.primary_base_begin(), E = Layout.primary_base_end();
- I != E; ++I)
- PrimaryBases.insert((*I).getBase());
+ CollectPrimaryBases(RD, CGM.getContext(), PrimaryBases);
const CXXDestructorDecl *ImplicitVirtualDtor = 0;
@@ -2336,6 +2356,33 @@ void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
NumVirtualFunctionPointers[RD] = CurrentIndex;
}
+bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
+
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return false;
+
+ const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+ if (!KeyFunction)
+ return true;
+
+ // Itanium C++ ABI, 5.2.6 Instantiated Templates:
+ // An instantiation of a class template requires:
+ // - In the object where instantiated, the virtual table...
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return true;
+
+ // If we're building with optimization, we always emit VTables since that
+ // allows for virtual function calls to be devirtualized.
+ // (We don't want to do this in -fapple-kext mode however).
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOptions().AppleKext)
+ return true;
+
+ return KeyFunction->hasBody();
+}
+
uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
NumVirtualFunctionPointers.find(RD);
@@ -2409,14 +2456,16 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
// Compute the mangled name.
llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
- Thunk.This, Name);
+ Thunk.This, Out);
else
- getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Name);
-
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
+ Out.flush();
+
const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
- return GetOrCreateLLVMFunction(Name, Ty, GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
}
static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
@@ -2563,7 +2612,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
const llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
FPT->isVariadic());
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
const CGFunctionInfo &FnInfo =
CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
@@ -2621,7 +2670,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
}
if (!ResultType->isVoidType() && Slot.isNull())
- CGM.getCXXABI().EmitReturnFromThunk(CGF, RV, ResultType);
+ CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
FinishFunction();
@@ -2632,7 +2681,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
setThunkVisibility(CGM, MD, Thunk, Fn);
}
-void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage)
{
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
@@ -2667,9 +2717,42 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
OldThunkFn->eraseFromParent();
}
- // Actually generate the thunk body.
llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+
+ if (!ThunkFn->isDeclaration()) {
+ if (UseAvailableExternallyLinkage) {
+ // There is already a thunk emitted for this function, do nothing.
+ return;
+ }
+
+ // If a function has a body, it should have available_externally linkage.
+ assert(ThunkFn->hasAvailableExternallyLinkage() &&
+ "Function should have available_externally linkage!");
+
+ // Change the linkage.
+ CGM.setFunctionLinkage(cast<CXXMethodDecl>(GD.getDecl()), ThunkFn);
+ return;
+ }
+
+ // Actually generate the thunk body.
CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+
+ if (UseAvailableExternallyLinkage)
+ ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
+}
+
+void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // We only want to do this when building with optimizations.
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // We can't emit thunks for member functions with incomplete types.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ if (CGM.getTypes().VerifyFuncTypeComplete(MD->getType().getTypePtr()))
+ return;
+
+ EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
}
void CodeGenVTables::EmitThunks(GlobalDecl GD)
@@ -2694,7 +2777,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
const ThunkInfoVectorTy &ThunkInfoVector = I->second;
for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
- EmitThunk(GD, ThunkInfoVector[I]);
+ EmitThunk(GD, ThunkInfoVector[I], /*UseAvailableExternallyLinkage=*/false);
}
void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
@@ -2703,9 +2786,7 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
// We may need to generate a definition for this vtable.
if (RequireVTable && !Entry.getInt()) {
- if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
- RD->getTemplateSpecializationKind()
- != TSK_ExplicitInstantiationDeclaration)
+ if (ShouldEmitVTableInThisTU(RD))
CGM.DeferredVTables.push_back(RD);
Entry.setInt(true);
@@ -2719,7 +2800,14 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
+ // -fapple-kext adds an extra entry at end of vtbl.
+ bool IsAppleKext = CGM.getContext().getLangOptions().AppleKext;
+ if (IsAppleKext)
+ NumVTableComponents += 1;
+
uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
+ if (IsAppleKext)
+ LayoutData[NumVTableComponents] = 0;
Entry.setPointer(LayoutData);
// Store the number of components.
@@ -2861,12 +2949,13 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
Init = CGM.GetAddrOfThunk(GD, Thunk);
-
+ MaybeEmitThunkAvailableExternally(GD, Thunk);
+
NextVTableThunkIndex++;
} else {
const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
- Init = CGM.GetAddrOfFunction(GD, Ty);
+ Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
}
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
@@ -2886,52 +2975,11 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
}
-/// GetGlobalVariable - Will return a global variable of the given type.
-/// If a variable with a different type already exists then a new variable
-/// with the right type will be created.
-/// FIXME: We should move this to CodeGenModule and rename it to something
-/// better and then use it in CGVTT and CGRTTI.
-static llvm::GlobalVariable *
-GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
- const llvm::Type *Ty,
- llvm::GlobalValue::LinkageTypes Linkage) {
-
- llvm::GlobalVariable *GV = Module.getNamedGlobal(Name);
- llvm::GlobalVariable *OldGV = 0;
-
- if (GV) {
- // Check if the variable has the right type.
- if (GV->getType()->getElementType() == Ty)
- return GV;
-
- assert(GV->isDeclaration() && "Declaration has wrong type!");
-
- OldGV = GV;
- }
-
- // Create a new variable.
- GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true,
- Linkage, 0, Name);
-
- if (OldGV) {
- // Replace occurrences of the old variable if needed.
- GV->takeName(OldGV);
-
- if (!OldGV->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtrForOldDecl);
- }
-
- OldGV->eraseFromParent();
- }
-
- return GV;
-}
-
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
ComputeVTableRelatedInformation(RD, true);
@@ -2940,8 +2988,11 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
- return GetGlobalVariable(CGM.getModule(), Name, ArrayType,
- llvm::GlobalValue::ExternalLinkage);
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
+ return GV;
}
void
@@ -2970,7 +3021,7 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
VTable->setLinkage(Linkage);
// Set the right visibility.
- CGM.setTypeVisibility(VTable, RD, /*ForRTTI*/ false);
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
}
llvm::GlobalVariable *
@@ -2991,8 +3042,10 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Get the mangled construction vtable name.
llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().
- mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), OutName);
+ mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
@@ -3001,8 +3054,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
- GetGlobalVariable(CGM.getModule(), Name, ArrayType,
- llvm::GlobalValue::InternalLinkage);
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::InternalLinkage);
// Add the thunks.
VTableThunksTy VTableThunks;
@@ -3034,7 +3087,10 @@ CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
VTable = GetAddrOfVTable(RD);
EmitVTableDefinition(VTable, Linkage, RD);
- GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+ if (RD->getNumVBases()) {
+ llvm::GlobalVariable *VTT = GetAddrOfVTT(RD);
+ EmitVTTDefinition(VTT, Linkage, RD);
+ }
// If this is the magic class __cxxabiv1::__fundamental_type_info,
// we will emit the typeinfo for the fundamental types. This is the
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index abcafd6c9c41..7c119fa42241 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/GlobalVariable.h"
+#include "clang/Basic/ABI.h"
#include "GlobalDecl.h"
namespace clang {
@@ -24,94 +25,6 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
-/// ReturnAdjustment - A return adjustment.
-struct ReturnAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
- /// of the virtual base class offset.
- int64_t VBaseOffsetOffset;
-
- ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
-
- friend bool operator==(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
- }
-
- friend bool operator<(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
- }
-};
-
-/// ThisAdjustment - A 'this' pointer adjustment.
-struct ThisAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
- /// of the virtual call offset.
- int64_t VCallOffsetOffset;
-
- ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
-
- friend bool operator==(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
- }
-
- friend bool operator<(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
- }
-};
-
-/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
-/// adjustment for a thunk.
-struct ThunkInfo {
- /// This - The 'this' pointer adjustment.
- ThisAdjustment This;
-
- /// Return - The return adjustment.
- ReturnAdjustment Return;
-
- ThunkInfo() { }
-
- ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
- : This(This), Return(Return) { }
-
- friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- return LHS.This == RHS.This && LHS.Return == RHS.Return;
- }
-
- friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- if (LHS.This < RHS.This)
- return true;
-
- return LHS.This == RHS.This && LHS.Return < RHS.Return;
- }
-
- bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
-};
-
// BaseSubobject - Uniquely identifies a direct or indirect base class.
// Stores both the base class decl and the offset from the most derived class to
// the base class.
@@ -269,13 +182,16 @@ class CodeGenVTables {
void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
- llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD);
-
/// EmitThunk - Emit a single thunk.
- void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
-
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage);
+
+ /// MaybeEmitThunkAvailableExternally - Try to emit the given thunk with
+ /// available_externally linkage to allow for inlining of thunks.
+ /// This will be done iff optimizations are enabled and the member function
+ /// doesn't contain any incomplete types.
+ void MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk);
+
/// ComputeVTableRelatedInformation - Compute and store all vtable related
/// information (vtable layout, vbase offset offsets, thunks etc) for the
/// given record decl.
@@ -295,14 +211,9 @@ public:
CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM) { }
- // isKeyFunctionInAnotherTU - True if this record has a key function and it is
- // in another translation unit.
- static bool isKeyFunctionInAnotherTU(ASTContext &Context,
- const CXXRecordDecl *RD) {
- assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
- const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- return KeyFunction && !KeyFunction->hasBody();
- }
+ /// \brief True if the VTable of this record must be emitted in the
+ /// translation unit.
+ bool ShouldEmitVTableInThisTU(const CXXRecordDecl *RD);
/// needsVTTParameter - Return whether the given global decl needs a VTT
/// parameter, which it does if it's a base constructor or destructor with
@@ -349,8 +260,15 @@ public:
GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
bool BaseIsVirtual,
VTableAddressPointsMapTy& AddressPoints);
-
- llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
+
+
+ /// GetAddrOfVTable - Get the address of the VTT for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD);
+
+ /// EmitVTTDefinition - Emit the definition of the given vtable.
+ void EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
/// EmitThunks - Emit the associated thunks for the given global decl.
void EmitThunks(GlobalDecl GD);
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index f57ecd245f09..7f77d552032a 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -25,7 +25,6 @@ namespace llvm {
namespace clang {
class ObjCPropertyRefExpr;
- class ObjCImplicitSetterGetterRefExpr;
namespace CodeGen {
class CGBitFieldInfo;
@@ -109,10 +108,8 @@ class LValue {
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- PropertyRef, // This is an Objective-C property reference, use
+ PropertyRef // This is an Objective-C property reference, use
// getPropertyRefExpr
- KVCRef // This is an objective-c 'implicit' property ref,
- // use getKVCRefExpr
} LVType;
llvm::Value *V;
@@ -129,9 +126,6 @@ class LValue {
// Obj-C property reference expression
const ObjCPropertyRefExpr *PropertyRefExpr;
-
- // ObjC 'implicit' property reference expression
- const ObjCImplicitSetterGetterRefExpr *KVCRefExpr;
};
// 'const' is unused here
@@ -157,8 +151,13 @@ class LValue {
bool ThreadLocalRef : 1;
Expr *BaseIvarExp;
+
+ /// TBAAInfo - TBAA information to attach to dereferences of this LValue.
+ llvm::MDNode *TBAAInfo;
+
private:
- void Initialize(Qualifiers Quals, unsigned Alignment = 0) {
+ void Initialize(Qualifiers Quals, unsigned Alignment = 0,
+ llvm::MDNode *TBAAInfo = 0) {
this->Quals = Quals;
this->Alignment = Alignment;
assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
@@ -167,6 +166,7 @@ private:
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
this->ThreadLocalRef = false;
this->BaseIvarExp = 0;
+ this->TBAAInfo = TBAAInfo;
}
public:
@@ -175,7 +175,6 @@ public:
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
bool isPropertyRef() const { return LVType == PropertyRef; }
- bool isKVCRef() const { return LVType == KVCRef; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -208,6 +207,9 @@ public:
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+ llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
+ void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
+
const Qualifiers &getQuals() const { return Quals; }
Qualifiers &getQuals() { return Quals; }
@@ -240,26 +242,25 @@ public:
}
// property ref lvalue
+ llvm::Value *getPropertyRefBaseAddr() const {
+ assert(isPropertyRef());
+ return V;
+ }
const ObjCPropertyRefExpr *getPropertyRefExpr() const {
assert(isPropertyRef());
return PropertyRefExpr;
}
- // 'implicit' property ref lvalue
- const ObjCImplicitSetterGetterRefExpr *getKVCRefExpr() const {
- assert(isKVCRef());
- return KVCRefExpr;
- }
-
static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
- ASTContext &Context) {
- Qualifiers Quals = Context.getCanonicalType(T).getQualifiers();
+ ASTContext &Context,
+ llvm::MDNode *TBAAInfo = 0) {
+ Qualifiers Quals = T.getQualifiers();
Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
LValue R;
R.LVType = Simple;
R.V = V;
- R.Initialize(Quals, Alignment);
+ R.Initialize(Quals, Alignment, TBAAInfo);
return R;
}
@@ -303,21 +304,98 @@ public:
// the lvalue. However, this complicates the code a bit, and I haven't figured
// out how to make it go wrong yet.
static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
- unsigned CVR) {
+ llvm::Value *Base) {
LValue R;
R.LVType = PropertyRef;
+ R.V = Base;
R.PropertyRefExpr = E;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers());
return R;
}
+};
- static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
- unsigned CVR) {
- LValue R;
- R.LVType = KVCRef;
- R.KVCRefExpr = E;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
- return R;
+/// An aggregate value slot.
+class AggValueSlot {
+ /// The address.
+ llvm::Value *Addr;
+
+ // Associated flags.
+ bool VolatileFlag : 1;
+ bool LifetimeFlag : 1;
+ bool RequiresGCollection : 1;
+
+ /// IsZeroed - This is set to true if the destination is known to be zero
+ /// before the assignment into it. This means that zero fields don't need to
+ /// be set.
+ bool IsZeroed : 1;
+
+public:
+ /// ignored - Returns an aggregate value slot indicating that the
+ /// aggregate value is being ignored.
+ static AggValueSlot ignored() {
+ AggValueSlot AV;
+ AV.Addr = 0;
+ AV.VolatileFlag = AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ return AV;
+ }
+
+ /// forAddr - Make a slot for an aggregate value.
+ ///
+ /// \param Volatile - true if the slot should be volatile-initialized
+ /// \param LifetimeExternallyManaged - true if the slot's lifetime
+ /// is being externally managed; false if a destructor should be
+ /// registered for any temporaries evaluated into the slot
+ /// \param RequiresGCollection - true if the slot is located
+ /// somewhere that ObjC GC calls should be emitted for
+ static AggValueSlot forAddr(llvm::Value *Addr, bool Volatile,
+ bool LifetimeExternallyManaged,
+ bool RequiresGCollection = false,
+ bool IsZeroed = false) {
+ AggValueSlot AV;
+ AV.Addr = Addr;
+ AV.VolatileFlag = Volatile;
+ AV.LifetimeFlag = LifetimeExternallyManaged;
+ AV.RequiresGCollection = RequiresGCollection;
+ AV.IsZeroed = IsZeroed;
+ return AV;
+ }
+
+ static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
+ bool RequiresGCollection = false) {
+ return forAddr(LV.getAddress(), LV.isVolatileQualified(),
+ LifetimeExternallyManaged, RequiresGCollection);
+ }
+
+ bool isLifetimeExternallyManaged() const {
+ return LifetimeFlag;
+ }
+ void setLifetimeExternallyManaged(bool Managed = true) {
+ LifetimeFlag = Managed;
+ }
+
+ bool isVolatile() const {
+ return VolatileFlag;
+ }
+
+ bool requiresGCollection() const {
+ return RequiresGCollection;
+ }
+
+ llvm::Value *getAddr() const {
+ return Addr;
+ }
+
+ bool isIgnored() const {
+ return Addr == 0;
+ }
+
+ RValue asRValue() const {
+ return RValue::getAggregate(getAddr(), isVolatile());
+ }
+
+ void setZeroed(bool V = true) { IsZeroed = V; }
+ bool isZeroed() const {
+ return IsZeroed;
}
};
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index b5a23291f8a2..8c20f29903aa 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -1,4 +1,11 @@
-set(LLVM_NO_RTTI 1)
+set(LLVM_LINK_COMPONENTS
+ asmparser
+ bitreader
+ bitwriter
+ ipo
+ )
+
+set(LLVM_USED_LIBS clangBasic clangAST clangFrontend)
add_clang_library(clangCodeGen
BackendUtil.cpp
@@ -7,6 +14,8 @@ add_clang_library(clangCodeGen
CGCall.cpp
CGClass.cpp
CGCXX.cpp
+ CGCXXABI.cpp
+ CGCleanup.cpp
CGDebugInfo.cpp
CGDecl.cpp
CGDeclCXX.cpp
@@ -29,9 +38,9 @@ add_clang_library(clangCodeGen
CodeGenAction.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
+ CodeGenTBAA.cpp
CodeGenTypes.cpp
ItaniumCXXABI.cpp
- Mangle.cpp
MicrosoftCXXABI.cpp
ModuleBuilder.cpp
TargetInfo.cpp
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 51c55a1a8390..a24bbc480c19 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -28,7 +28,7 @@
using namespace clang;
using namespace llvm;
-namespace {
+namespace clang {
class BackendConsumer : public ASTConsumer {
Diagnostic &Diags;
BackendAction Action;
@@ -121,10 +121,10 @@ namespace {
// Install an inline asm handler so that diagnostics get printed through
// our diagnostics hooks.
LLVMContext &Ctx = TheModule->getContext();
- void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
+ LLVMContext::InlineAsmDiagHandlerTy OldHandler =
+ Ctx.getInlineAsmDiagnosticHandler();
void *OldContext = Ctx.getInlineAsmDiagnosticContext();
- Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
- this);
+ Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
TheModule.get(), Action, AsmOutStream);
@@ -209,8 +209,7 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
// issue as being an error in the source with a note showing the instantiated
// code.
if (LocCookie.isValid()) {
- Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
- diag::err_fe_inline_asm).AddString(Message);
+ Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
if (D.getLoc().isValid())
Diags.Report(Loc, diag::note_fe_inline_asm_here);
@@ -225,9 +224,15 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
//
-CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
+ : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
-CodeGenAction::~CodeGenAction() {}
+CodeGenAction::~CodeGenAction() {
+ TheModule.reset();
+ if (OwnsVMContext)
+ delete VMContext;
+}
bool CodeGenAction::hasIRSupport() const { return true; }
@@ -237,16 +242,18 @@ void CodeGenAction::EndSourceFileAction() {
return;
// Steal the module from the consumer.
- BackendConsumer *Consumer = static_cast<BackendConsumer*>(
- &getCompilerInstance().getASTConsumer());
-
- TheModule.reset(Consumer->takeModule());
+ TheModule.reset(BEConsumer->takeModule());
}
llvm::Module *CodeGenAction::takeModule() {
return TheModule.take();
}
+llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
+ OwnsVMContext = false;
+ return VMContext;
+}
+
static raw_ostream *GetOutputStream(CompilerInstance &CI,
llvm::StringRef InFile,
BackendAction Action) {
@@ -275,10 +282,12 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
if (BA != Backend_EmitNothing && !OS)
return 0;
- return new BackendConsumer(BA, CI.getDiagnostics(),
- CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
- CI.getLLVMContext());
+ BEConsumer =
+ new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ *VMContext);
+ return BEConsumer;
}
void CodeGenAction::ExecuteAction() {
@@ -303,7 +312,7 @@ void CodeGenAction::ExecuteAction() {
getCurrentFile().c_str());
llvm::SMDiagnostic Err;
- TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext()));
+ TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
if (!TheModule) {
// Translate from the diagnostic info to the SourceManager location.
SourceLocation Loc = SM.getLocation(
@@ -318,7 +327,7 @@ void CodeGenAction::ExecuteAction() {
unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
Msg);
- CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID);
+ CI.getDiagnostics().Report(Loc, DiagID);
return;
}
@@ -334,15 +343,20 @@ void CodeGenAction::ExecuteAction() {
//
-EmitAssemblyAction::EmitAssemblyAction()
- : CodeGenAction(Backend_EmitAssembly) {}
+EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitAssembly, _VMContext) {}
-EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitBC, _VMContext) {}
-EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitLL, _VMContext) {}
-EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitNothing, _VMContext) {}
-EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitMCNull, _VMContext) {}
-EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
+EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 51d084e1d301..f1b72863caca 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -29,25 +29,17 @@ using namespace clang;
using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
- : BlockFunction(cgm, *this, Builder), CGM(cgm),
- Target(CGM.getContext().Target),
- Builder(cgm.getModule().getContext()),
+ : CodeGenTypeCache(cgm), CGM(cgm),
+ Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
+ BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0),
DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
TrapBB(0) {
-
- // Get some frequently used types.
- LLVMPointerWidth = Target.getPointerWidth(0);
- llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
- IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
- Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
- Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
-
- Exceptions = getContext().getLangOptions().Exceptions;
+
CatchUndefined = getContext().getLangOptions().CatchUndefined;
CGM.getCXXABI().getMangleContext().startNewFunction();
}
@@ -125,7 +117,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// Emit function epilog (to return).
EmitReturnBlock();
- EmitFunctionInstrumentation("__cyg_profile_func_exit");
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -184,20 +177,16 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
- if (!ShouldInstrumentFunction())
- return;
-
const llvm::PointerType *PointerTy;
const llvm::FunctionType *FunctionTy;
std::vector<const llvm::Type*> ProfileFuncArgs;
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
- PointerTy = llvm::Type::getInt8PtrTy(VMContext);
+ PointerTy = Int8PtrTy;
ProfileFuncArgs.push_back(PointerTy);
ProfileFuncArgs.push_back(PointerTy);
- FunctionTy = llvm::FunctionType::get(
- llvm::Type::getVoidTy(VMContext),
- ProfileFuncArgs, false);
+ FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
@@ -210,6 +199,15 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
CallSite);
}
+void CodeGenFunction::EmitMCountInstrumentation() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
+
+ llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
+ Target.getMCountName());
+ Builder.CreateCall(MCountFn);
+}
+
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
const FunctionArgList &Args,
@@ -232,6 +230,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
break;
}
+ if (getContext().getLangOptions().OpenCL) {
+ // Add metadata for a kernel function.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ llvm::LLVMContext &Context = getLLVMContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+
+ llvm::Value *Op = Fn;
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, &Op, 1));
+ }
+ }
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -246,18 +257,23 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Builder.SetInsertPoint(EntryBB);
- QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
- false, false, 0, 0,
- /*FIXME?*/
- FunctionType::ExtInfo());
-
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
+ // FIXME: what is going on here and why does it ignore all these
+ // interesting type properties?
+ QualType FnType =
+ getContext().getFunctionType(RetTy, 0, 0,
+ FunctionProtoType::ExtProtoInfo());
+
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
- EmitFunctionInstrumentation("__cyg_profile_func_enter");
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ if (CGM.getCodeGenOpts().InstrumentForProfiling)
+ EmitMCountInstrumentation();
// FIXME: Leaked.
// CC info is ignored, hopefully?
@@ -384,8 +400,7 @@ bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
IgnoreCaseStmts = true;
// Scan subexpressions for verboten labels.
- for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
- I != E; ++I)
+ for (Stmt::const_child_range I = S->children(); I; ++I)
if (ContainsLabel(*I, IgnoreCaseStmts))
return true;
@@ -442,13 +457,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Emit the LHS as a conditional. If the LHS conditional is false, we
// want to jump to the FalseBlock.
llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+
+ ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
EmitBlock(LHSTrue);
// Any temporaries created here are conditional.
- BeginConditionalBranch();
+ eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
- EndConditionalBranch();
+ eval.end(*this);
return;
} else if (CondBOp->getOpcode() == BO_LOr) {
@@ -469,13 +486,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Emit the LHS as a conditional. If the LHS conditional is true, we
// want to jump to the TrueBlock.
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+
+ ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
EmitBlock(LHSFalse);
// Any temporaries created here are conditional.
- BeginConditionalBranch();
+ eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
- EndConditionalBranch();
+ eval.end(*this);
return;
}
@@ -495,11 +514,20 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+
+ ConditionalEvaluation cond(*this);
EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+
+ cond.begin(*this);
EmitBlock(LHSBlock);
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ cond.begin(*this);
EmitBlock(RHSBlock);
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
return;
}
}
@@ -516,6 +544,57 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
CGM.ErrorUnsupported(S, Type, OmitOnError);
}
+/// emitNonZeroVLAInit - Emit the "zero" initialization of a
+/// variable-length array whose elements have a non-zero bit-pattern.
+///
+/// \param src - a char* pointing to the bit-pattern for a single
+/// base element of the array
+/// \param sizeInChars - the total size of the VLA, in chars
+/// \param align - the total alignment of the VLA
+static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *sizeInChars) {
+ std::pair<CharUnits,CharUnits> baseSizeAndAlign
+ = CGF.getContext().getTypeInfoInChars(baseType);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *baseSizeInChars
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+
+ const llvm::Type *i8p = Builder.getInt8PtrTy();
+
+ llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
+ llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+
+ llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
+
+ // Make a loop over the VLA. C99 guarantees that the VLA element
+ // count must be nonzero.
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = Builder.CreatePHI(i8p, "vla.cur");
+ cur->reserveOperandSpace(2);
+ cur->addIncoming(begin, originBB);
+
+ // memcpy the individual element bit-pattern.
+ Builder.CreateMemCpy(cur, src, baseSizeInChars,
+ baseSizeAndAlign.second.getQuantity(),
+ /*volatile*/ false);
+
+ // Go to the next element.
+ llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
+
+ // Leave if that's the end of the VLA.
+ llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
+ Builder.CreateCondBr(done, contBB, loopBB);
+ cur->addIncoming(next, loopBB);
+
+ CGF.EmitBlock(contBB);
+}
+
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
@@ -529,26 +608,42 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Cast the dest ptr to the appropriate i8 pointer type.
unsigned DestAS =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::Type *BP =
- llvm::Type::getInt8PtrTy(VMContext, DestAS);
+ const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
- uint64_t Size = TypeInfo.first;
- unsigned Align = TypeInfo.second;
+ uint64_t Size = TypeInfo.first / 8;
+ unsigned Align = TypeInfo.second / 8;
- // Don't bother emitting a zero-byte memset.
- if (Size == 0)
- return;
+ llvm::Value *SizeVal;
+ const VariableArrayType *vla;
- llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8);
- llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8);
+ // Don't bother emitting a zero-byte memset.
+ if (Size == 0) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (const VariableArrayType *vlaType =
+ dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ SizeVal = GetVLASize(vlaType);
+ vla = vlaType;
+ } else {
+ return;
+ }
+ } else {
+ SizeVal = llvm::ConstantInt::get(IntPtrTy, Size);
+ vla = 0;
+ }
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ // For a VLA, emit a single element, then splat that over the VLA.
+ if (vla) Ty = getContext().getBaseElementType(vla);
+
llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
llvm::GlobalVariable *NullVariable =
@@ -559,27 +654,20 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
llvm::Value *SrcPtr =
Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
- // FIXME: variable-size types?
+ if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
- llvm::Constant *Memcpy =
- CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy);
- Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal,
- /*volatile*/ Builder.getFalse());
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align, false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
-
- // FIXME: Handle variable sized types.
- Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
- Builder.getInt8(0),
- SizeVal, AlignVal, /*volatile*/ Builder.getFalse());
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, Align, false);
}
-llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
// Make sure that there is a block for the indirect goto.
if (IndirectBranch == 0)
GetIndirectGotoBlock();
@@ -597,8 +685,6 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
// Create the PHI node that indirect gotos will add entries to.
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
@@ -621,6 +707,9 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
EnsureInsertPoint();
if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ // unknown size indication requires no size computation.
+ if (!VAT->getSizeExpr())
+ return 0;
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
if (!SizeEntry) {
@@ -649,6 +738,11 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
return 0;
}
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ EmitVLASize(PT->getInnerType());
+ return 0;
+ }
+
const PointerType *PT = Ty->getAs<PointerType>();
assert(PT && "unknown VM type!");
EmitVLASize(PT->getPointeeType());
@@ -656,686 +750,41 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
- if (CGM.getContext().getBuiltinVaListType()->isArrayType())
+ if (getContext().getBuiltinVaListType()->isArrayType())
return EmitScalarExpr(E);
return EmitLValue(E).getAddress();
}
-/// Pops cleanup blocks until the given savepoint is reached.
-void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
- assert(Old.isValid());
-
- while (EHStack.stable_begin() != Old) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
-
- // As long as Old strictly encloses the scope's enclosing normal
- // cleanup, we're going to emit another normal cleanup which
- // fallthrough can propagate through.
- bool FallThroughIsBranchThrough =
- Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
-
- PopCleanupBlock(FallThroughIsBranchThrough);
- }
-}
-
-static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
- EHCleanupScope &Scope) {
- assert(Scope.isNormalCleanup());
- llvm::BasicBlock *Entry = Scope.getNormalBlock();
- if (!Entry) {
- Entry = CGF.createBasicBlock("cleanup");
- Scope.setNormalBlock(Entry);
- }
- return Entry;
-}
-
-static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
- EHCleanupScope &Scope) {
- assert(Scope.isEHCleanup());
- llvm::BasicBlock *Entry = Scope.getEHBlock();
- if (!Entry) {
- Entry = CGF.createBasicBlock("eh.cleanup");
- Scope.setEHBlock(Entry);
- }
- return Entry;
-}
-
-/// Transitions the terminator of the given exit-block of a cleanup to
-/// be a cleanup switch.
-static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
- llvm::BasicBlock *Block) {
- // If it's a branch, turn it into a switch whose default
- // destination is its original target.
- llvm::TerminatorInst *Term = Block->getTerminator();
- assert(Term && "can't transition block without terminator");
-
- if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
- assert(Br->isUnconditional());
- llvm::LoadInst *Load =
- new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
- Br->eraseFromParent();
- return Switch;
- } else {
- return cast<llvm::SwitchInst>(Term);
- }
-}
-
-/// Attempts to reduce a cleanup's entry block to a fallthrough. This
-/// is basically llvm::MergeBlockIntoPredecessor, except
-/// simplified/optimized for the tighter constraints on cleanup blocks.
-///
-/// Returns the new block, whatever it is.
-static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
- llvm::BasicBlock *Entry) {
- llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
- if (!Pred) return Entry;
-
- llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
- if (!Br || Br->isConditional()) return Entry;
- assert(Br->getSuccessor(0) == Entry);
-
- // If we were previously inserting at the end of the cleanup entry
- // block, we'll need to continue inserting at the end of the
- // predecessor.
- bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
- assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
-
- // Kill the branch.
- Br->eraseFromParent();
-
- // Merge the blocks.
- Pred->getInstList().splice(Pred->end(), Entry->getInstList());
-
- // Kill the entry block.
- Entry->eraseFromParent();
-
- if (WasInsertBlock)
- CGF.Builder.SetInsertPoint(Pred);
-
- return Pred;
-}
-
-static void EmitCleanup(CodeGenFunction &CGF,
- EHScopeStack::Cleanup *Fn,
- bool ForEH) {
- if (ForEH) CGF.EHStack.pushTerminate();
- Fn->Emit(CGF, ForEH);
- if (ForEH) CGF.EHStack.popTerminate();
- assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
-}
-
-/// Pops a cleanup block. If the block includes a normal cleanup, the
-/// current insertion point is threaded through the cleanup, as are
-/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
- assert(!EHStack.empty() && "cleanup stack is empty!");
- assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
- assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
- assert(Scope.isActive() && "cleanup was still inactive when popped!");
-
- // Check whether we need an EH cleanup. This is only true if we've
- // generated a lazy EH cleanup block.
- bool RequiresEHCleanup = Scope.hasEHBranches();
-
- // Check the three conditions which might require a normal cleanup:
-
- // - whether there are branch fix-ups through this cleanup
- unsigned FixupDepth = Scope.getFixupDepth();
- bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
-
- // - whether there are branch-throughs or branch-afters
- bool HasExistingBranches = Scope.hasBranches();
-
- // - whether there's a fallthrough
- llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
- bool HasFallthrough = (FallthroughSource != 0);
-
- bool RequiresNormalCleanup = false;
- if (Scope.isNormalCleanup() &&
- (HasFixups || HasExistingBranches || HasFallthrough)) {
- RequiresNormalCleanup = true;
- }
-
- // If we don't need the cleanup at all, we're done.
- if (!RequiresNormalCleanup && !RequiresEHCleanup) {
- EHStack.popCleanup(); // safe because there are no fixups
- assert(EHStack.getNumBranchFixups() == 0 ||
- EHStack.hasNormalCleanups());
- return;
- }
-
- // Copy the cleanup emission data out. Note that SmallVector
- // guarantees maximal alignment for its buffer regardless of its
- // type parameter.
- llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
- CleanupBuffer.reserve(Scope.getCleanupSize());
- memcpy(CleanupBuffer.data(),
- Scope.getCleanupBuffer(), Scope.getCleanupSize());
- CleanupBuffer.set_size(Scope.getCleanupSize());
- EHScopeStack::Cleanup *Fn =
- reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
-
- // We want to emit the EH cleanup after the normal cleanup, but go
- // ahead and do the setup for the EH cleanup while the scope is still
- // alive.
- llvm::BasicBlock *EHEntry = 0;
- llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
- if (RequiresEHCleanup) {
- EHEntry = CreateEHEntry(*this, Scope);
-
- // Figure out the branch-through dest if necessary.
- llvm::BasicBlock *EHBranchThroughDest = 0;
- if (Scope.hasEHBranchThroughs()) {
- assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
- EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
- EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
- }
-
- // If we have exactly one branch-after and no branch-throughs, we
- // can dispatch it without a switch.
- if (!Scope.hasEHBranchThroughs() &&
- Scope.getNumEHBranchAfters() == 1) {
- assert(!EHBranchThroughDest);
-
- // TODO: remove the spurious eh.cleanup.dest stores if this edge
- // never went through any switches.
- llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
-
- // Otherwise, if we have any branch-afters, we need a switch.
- } else if (Scope.getNumEHBranchAfters()) {
- // The default of the switch belongs to the branch-throughs if
- // they exist.
- llvm::BasicBlock *Default =
- (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
-
- const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
-
- llvm::LoadInst *Load =
- new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
-
- EHInstsToAppend.push_back(Load);
- EHInstsToAppend.push_back(Switch);
-
- for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
- Switch->addCase(Scope.getEHBranchAfterIndex(I),
- Scope.getEHBranchAfterBlock(I));
-
- // Otherwise, we have only branch-throughs; jump to the next EH
- // cleanup.
- } else {
- assert(EHBranchThroughDest);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
- }
- }
-
- if (!RequiresNormalCleanup) {
- EHStack.popCleanup();
- } else {
- // As a kindof crazy internal case, branch-through fall-throughs
- // leave the insertion point set to the end of the last cleanup.
- bool HasPrebranchedFallthrough =
- (HasFallthrough && FallthroughSource->getTerminator());
- assert(!HasPrebranchedFallthrough ||
- FallthroughSource->getTerminator()->getSuccessor(0)
- == Scope.getNormalBlock());
-
- // If we have a fallthrough and no other need for the cleanup,
- // emit it directly.
- if (HasFallthrough && !HasPrebranchedFallthrough &&
- !HasFixups && !HasExistingBranches) {
-
- // Fixups can cause us to optimistically create a normal block,
- // only to later have no real uses for it. Just delete it in
- // this case.
- // TODO: we can potentially simplify all the uses after this.
- if (Scope.getNormalBlock()) {
- Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
- delete Scope.getNormalBlock();
- }
-
- EHStack.popCleanup();
-
- EmitCleanup(*this, Fn, /*ForEH*/ false);
-
- // Otherwise, the best approach is to thread everything through
- // the cleanup block and then try to clean up after ourselves.
- } else {
- // Force the entry block to exist.
- llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
-
- // If there's a fallthrough, we need to store the cleanup
- // destination index. For fall-throughs this is always zero.
- if (HasFallthrough && !HasPrebranchedFallthrough)
- Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
-
- // Emit the entry block. This implicitly branches to it if we
- // have fallthrough. All the fixups and existing branches should
- // already be branched to it.
- EmitBlock(NormalEntry);
-
- bool HasEnclosingCleanups =
- (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
-
- // Compute the branch-through dest if we need it:
- // - if there are branch-throughs threaded through the scope
- // - if fall-through is a branch-through
- // - if there are fixups that will be optimistically forwarded
- // to the enclosing cleanup
- llvm::BasicBlock *BranchThroughDest = 0;
- if (Scope.hasBranchThroughs() ||
- (HasFallthrough && FallthroughIsBranchThrough) ||
- (HasFixups && HasEnclosingCleanups)) {
- assert(HasEnclosingCleanups);
- EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
- BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
- }
-
- llvm::BasicBlock *FallthroughDest = 0;
- llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
-
- // If there's exactly one branch-after and no other threads,
- // we can route it without a switch.
- if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
- Scope.getNumBranchAfters() == 1) {
- assert(!BranchThroughDest);
-
- // TODO: clean up the possibly dead stores to the cleanup dest slot.
- llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
- InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
-
- // Build a switch-out if we need it:
- // - if there are branch-afters threaded through the scope
- // - if fall-through is a branch-after
- // - if there are fixups that have nowhere left to go and
- // so must be immediately resolved
- } else if (Scope.getNumBranchAfters() ||
- (HasFallthrough && !FallthroughIsBranchThrough) ||
- (HasFixups && !HasEnclosingCleanups)) {
-
- llvm::BasicBlock *Default =
- (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
-
- // TODO: base this on the number of branch-afters and fixups
- const unsigned SwitchCapacity = 10;
-
- llvm::LoadInst *Load =
- new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
-
- InstsToAppend.push_back(Load);
- InstsToAppend.push_back(Switch);
-
- // Branch-after fallthrough.
- if (HasFallthrough && !FallthroughIsBranchThrough) {
- FallthroughDest = createBasicBlock("cleanup.cont");
- Switch->addCase(Builder.getInt32(0), FallthroughDest);
- }
-
- for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
- Switch->addCase(Scope.getBranchAfterIndex(I),
- Scope.getBranchAfterBlock(I));
- }
-
- if (HasFixups && !HasEnclosingCleanups)
- ResolveAllBranchFixups(Switch);
- } else {
- // We should always have a branch-through destination in this case.
- assert(BranchThroughDest);
- InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
- }
-
- // We're finally ready to pop the cleanup.
- EHStack.popCleanup();
- assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
-
- EmitCleanup(*this, Fn, /*ForEH*/ false);
-
- // Append the prepared cleanup prologue from above.
- llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
- for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
- NormalExit->getInstList().push_back(InstsToAppend[I]);
-
- // Optimistically hope that any fixups will continue falling through.
- for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
- I < E; ++I) {
- BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
- if (!Fixup.Destination) continue;
- if (!Fixup.OptimisticBranchBlock) {
- new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
- Fixup.InitialBranch->setSuccessor(0, NormalEntry);
- }
- Fixup.OptimisticBranchBlock = NormalExit;
- }
-
- if (FallthroughDest)
- EmitBlock(FallthroughDest);
- else if (!HasFallthrough)
- Builder.ClearInsertionPoint();
-
- // Check whether we can merge NormalEntry into a single predecessor.
- // This might invalidate (non-IR) pointers to NormalEntry.
- llvm::BasicBlock *NewNormalEntry =
- SimplifyCleanupEntry(*this, NormalEntry);
-
- // If it did invalidate those pointers, and NormalEntry was the same
- // as NormalExit, go back and patch up the fixups.
- if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
- for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
- I < E; ++I)
- CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
- }
- }
-
- assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
-
- // Emit the EH cleanup if required.
- if (RequiresEHCleanup) {
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
-
- EmitBlock(EHEntry);
- EmitCleanup(*this, Fn, /*ForEH*/ true);
-
- // Append the prepared cleanup prologue from above.
- llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
- for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
- EHExit->getInstList().push_back(EHInstsToAppend[I]);
-
- Builder.restoreIP(SavedIP);
-
- SimplifyCleanupEntry(*this, EHEntry);
- }
-}
-
-/// Terminate the current block by emitting a branch which might leave
-/// the current cleanup-protected scope. The target scope may not yet
-/// be known, in which case this will require a fixup.
-///
-/// As a side-effect, this method clears the insertion point.
-void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
- assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
- && "stale jump destination");
-
- if (!HaveInsertPoint())
- return;
-
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
-
- // Calculate the innermost active normal cleanup.
- EHScopeStack::stable_iterator
- TopCleanup = EHStack.getInnermostActiveNormalCleanup();
-
- // If we're not in an active normal cleanup scope, or if the
- // destination scope is within the innermost active normal cleanup
- // scope, we don't need to worry about fixups.
- if (TopCleanup == EHStack.stable_end() ||
- TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
- Builder.ClearInsertionPoint();
- return;
- }
-
- // If we can't resolve the destination cleanup scope, just add this
- // to the current cleanup scope as a branch fixup.
- if (!Dest.getScopeDepth().isValid()) {
- BranchFixup &Fixup = EHStack.addBranchFixup();
- Fixup.Destination = Dest.getBlock();
- Fixup.DestinationIndex = Dest.getDestIndex();
- Fixup.InitialBranch = BI;
- Fixup.OptimisticBranchBlock = 0;
-
- Builder.ClearInsertionPoint();
- return;
- }
-
- // Otherwise, thread through all the normal cleanups in scope.
-
- // Store the index at the start.
- llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
-
- // Adjust BI to point to the first cleanup block.
- {
- EHCleanupScope &Scope =
- cast<EHCleanupScope>(*EHStack.find(TopCleanup));
- BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
- }
-
- // Add this destination to all the scopes involved.
- EHScopeStack::stable_iterator I = TopCleanup;
- EHScopeStack::stable_iterator E = Dest.getScopeDepth();
- if (E.strictlyEncloses(I)) {
- while (true) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
- assert(Scope.isNormalCleanup());
- I = Scope.getEnclosingNormalCleanup();
-
- // If this is the last cleanup we're propagating through, tell it
- // that there's a resolved jump moving through it.
- if (!E.strictlyEncloses(I)) {
- Scope.addBranchAfter(Index, Dest.getBlock());
- break;
- }
-
- // Otherwise, tell the scope that there's a jump propoagating
- // through it. If this isn't new information, all the rest of
- // the work has been done before.
- if (!Scope.addBranchThrough(Dest.getBlock()))
- break;
- }
- }
-
- Builder.ClearInsertionPoint();
-}
-
-void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
- // We should never get invalid scope depths for an UnwindDest; that
- // implies that the destination wasn't set up correctly.
- assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
-
- if (!HaveInsertPoint())
- return;
-
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
-
- // Calculate the innermost active cleanup.
- EHScopeStack::stable_iterator
- InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
-
- // If the destination is in the same EH cleanup scope as us, we
- // don't need to thread through anything.
- if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
- Builder.ClearInsertionPoint();
- return;
- }
- assert(InnermostCleanup != EHStack.stable_end());
-
- // Store the index at the start.
- llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
-
- // Adjust BI to point to the first cleanup block.
- {
- EHCleanupScope &Scope =
- cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
- BI->setSuccessor(0, CreateEHEntry(*this, Scope));
- }
-
- // Add this destination to all the scopes involved.
- for (EHScopeStack::stable_iterator
- I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
- assert(E.strictlyEncloses(I));
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
- assert(Scope.isEHCleanup());
- I = Scope.getEnclosingEHCleanup();
-
- // If this is the last cleanup we're propagating through, add this
- // as a branch-after.
- if (I == E) {
- Scope.addEHBranchAfter(Index, Dest.getBlock());
- break;
- }
-
- // Otherwise, add it as a branch-through. If this isn't new
- // information, all the rest of the work has been done before.
- if (!Scope.addEHBranchThrough(Dest.getBlock()))
- break;
- }
-
- Builder.ClearInsertionPoint();
-}
-
-/// All the branch fixups on the EH stack have propagated out past the
-/// outermost normal cleanup; resolve them all by adding cases to the
-/// given switch instruction.
-void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) {
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
-
- for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
- // Skip this fixup if its destination isn't set or if we've
- // already treated it.
- BranchFixup &Fixup = EHStack.getBranchFixup(I);
- if (Fixup.Destination == 0) continue;
- if (!CasesAdded.insert(Fixup.Destination)) continue;
-
- Switch->addCase(Builder.getInt32(Fixup.DestinationIndex),
- Fixup.Destination);
- }
-
- EHStack.clearFixups();
-}
-
-void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
- assert(Block && "resolving a null target block");
- if (!EHStack.getNumBranchFixups()) return;
-
- assert(EHStack.hasNormalCleanups() &&
- "branch fixups exist with no normal cleanups on stack");
-
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
- bool ResolvedAny = false;
-
- for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
- // Skip this fixup if its destination doesn't match.
- BranchFixup &Fixup = EHStack.getBranchFixup(I);
- if (Fixup.Destination != Block) continue;
-
- Fixup.Destination = 0;
- ResolvedAny = true;
-
- // If it doesn't have an optimistic branch block, LatestBranch is
- // already pointing to the right place.
- llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
- if (!BranchBB)
- continue;
-
- // Don't process the same optimistic branch block twice.
- if (!ModifiedOptimisticBlocks.insert(BranchBB))
- continue;
-
- llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
-
- // Add a case to the switch.
- Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
- }
-
- if (ResolvedAny)
- EHStack.popNullFixups();
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::Constant *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
-/// Activate a cleanup that was created in an inactivated state.
-void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
- assert(C != EHStack.stable_end() && "activating bottom of stack?");
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
- assert(!Scope.isActive() && "double activation");
-
- // Calculate whether the cleanup was used:
- bool Used = false;
+CodeGenFunction::PeepholeProtection
+CodeGenFunction::protectFromPeepholes(RValue rvalue) {
+ // At the moment, the only aggressive peephole we do in IR gen
+ // is trunc(zext) folding, but if we add more, we can easily
+ // extend this protection.
- // - as a normal cleanup
- if (Scope.isNormalCleanup()) {
- bool NormalUsed = false;
- if (Scope.getNormalBlock()) {
- NormalUsed = true;
- } else {
- // Check whether any enclosed cleanups were needed.
- for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostNormalCleanup(); I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getNormalBlock()) {
- NormalUsed = true;
- break;
- }
- I = S.getEnclosingNormalCleanup();
- }
- }
+ if (!rvalue.isScalar()) return PeepholeProtection();
+ llvm::Value *value = rvalue.getScalarVal();
+ if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
- if (NormalUsed)
- Used = true;
- else
- Scope.setActivatedBeforeNormalUse(true);
- }
+ // Just make an extra bitcast.
+ assert(HaveInsertPoint());
+ llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
+ Builder.GetInsertBlock());
- // - as an EH cleanup
- if (Scope.isEHCleanup()) {
- bool EHUsed = false;
- if (Scope.getEHBlock()) {
- EHUsed = true;
- } else {
- // Check whether any enclosed cleanups were needed.
- for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostEHCleanup(); I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getEHBlock()) {
- EHUsed = true;
- break;
- }
- I = S.getEnclosingEHCleanup();
- }
- }
-
- if (EHUsed)
- Used = true;
- else
- Scope.setActivatedBeforeEHUse(true);
- }
-
- llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
- if (Used) {
- Var = CreateTempAlloca(Builder.getInt1Ty());
- InitTempAlloca(Var, Builder.getFalse());
- }
- Scope.setActiveVar(Var);
+ PeepholeProtection protection;
+ protection.Inst = inst;
+ return protection;
}
-llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
- if (!NormalCleanupDest)
- NormalCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
- return NormalCleanupDest;
-}
+void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
+ if (!protection.Inst) return;
-llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
- if (!EHCleanupDest)
- EHCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
- return EHCleanupDest;
-}
-
-void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
- llvm::ConstantInt *Init) {
- assert (Init && "Invalid DeclRefExpr initializer!");
- if (CGDebugInfo *Dbg = getDebugInfo())
- Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder);
+ // In theory, we could try to duplicate the peepholes now, but whatever.
+ protection.Inst->eraseFromParent();
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 4f0420536ad2..67ef41448e8d 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -18,15 +18,14 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/CharUnits.h"
+#include "clang/Basic/ABI.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
#include "CodeGenModule.h"
-#include "CGBlocks.h"
#include "CGBuilder.h"
#include "CGCall.h"
-#include "CGCXX.h"
#include "CGValue.h"
namespace llvm {
@@ -46,6 +45,7 @@ namespace clang {
class CXXDestructorDecl;
class CXXTryStmt;
class Decl;
+ class LabelDecl;
class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
@@ -71,6 +71,8 @@ namespace CodeGen {
class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
+ class BlockFlags;
+ class BlockFieldFlags;
/// A branch fixup. These are required when emitting a goto to a
/// label which hasn't been emitted yet. The goto is optimistically
@@ -97,6 +99,28 @@ struct BranchFixup {
llvm::BranchInst *InitialBranch;
};
+template <class T> struct InvariantValue {
+ typedef T type;
+ typedef T saved_type;
+ static bool needsSaving(type value) { return false; }
+ static saved_type save(CodeGenFunction &CGF, type value) { return value; }
+ static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+ llvm::is_base_of<llvm::Value, T>::value &&
+ !llvm::is_base_of<llvm::Constant, T>::value &&
+ !llvm::is_base_of<llvm::BasicBlock, T>::value>
+struct DominatingPointer;
+template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
+// template <class T> struct DominatingPointer<T,true> at end of file
+
+template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
+
enum CleanupKind {
EHCleanup = 0x1,
NormalCleanup = 0x2,
@@ -175,6 +199,63 @@ public:
virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
};
+ /// UnconditionalCleanupN stores its N parameters and just passes
+ /// them to the real cleanup function.
+ template <class T, class A0>
+ class UnconditionalCleanup1 : public Cleanup {
+ A0 a0;
+ public:
+ UnconditionalCleanup1(A0 a0) : a0(a0) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ T::Emit(CGF, IsForEHCleanup, a0);
+ }
+ };
+
+ template <class T, class A0, class A1>
+ class UnconditionalCleanup2 : public Cleanup {
+ A0 a0; A1 a1;
+ public:
+ UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ T::Emit(CGF, IsForEHCleanup, a0, a1);
+ }
+ };
+
+ /// ConditionalCleanupN stores the saved form of its N parameters,
+ /// then restores them and performs the cleanup.
+ template <class T, class A0>
+ class ConditionalCleanup1 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ A0_saved a0_saved;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ T::Emit(CGF, IsForEHCleanup, a0);
+ }
+
+ public:
+ ConditionalCleanup1(A0_saved a0)
+ : a0_saved(a0) {}
+ };
+
+ template <class T, class A0, class A1>
+ class ConditionalCleanup2 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ T::Emit(CGF, IsForEHCleanup, a0, a1);
+ }
+
+ public:
+ ConditionalCleanup2(A0_saved a0, A1_saved a1)
+ : a0_saved(a0), a1_saved(a1) {}
+ };
+
private:
// The implementation for this class is in CGException.h and
// CGException.cpp; the definition is here because it's used as a
@@ -285,6 +366,25 @@ public:
(void) Obj;
}
+ // Feel free to add more variants of the following:
+
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class A0, class A1, class A2>
+ T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+ return new (Buffer) T(N, a0, a1, a2);
+ }
+
/// Pops a cleanup scope off the stack. This should only be called
/// by CodeGenFunction::PopCleanupBlock.
void popCleanup();
@@ -395,7 +495,7 @@ public:
void popNullFixups();
/// Clears the branch-fixups list. This should only be called by
- /// CodeGenFunction::ResolveAllBranchFixups.
+ /// ResolveAllBranchFixups.
void clearFixups() { BranchFixups.clear(); }
/// Gets the next EH destination index.
@@ -404,7 +504,7 @@ public:
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
-class CodeGenFunction : public BlockFunction {
+class CodeGenFunction : public CodeGenTypeCache {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
@@ -423,7 +523,7 @@ public:
llvm::BasicBlock *getBlock() const { return Block; }
EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
unsigned getDestIndex() const { return Index; }
-
+
private:
llvm::BasicBlock *Block;
EHScopeStack::stable_iterator ScopeDepth;
@@ -482,13 +582,11 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- // intptr_t, i32, i64
- const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty;
- uint32_t LLVMPointerWidth;
-
- bool Exceptions;
bool CatchUndefined;
-
+
+ const CodeGen::CGBlockInfo *BlockInfo;
+ llvm::Value *BlockPointer;
+
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -510,6 +608,15 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
+ template <class T>
+ typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+ return DominatingValue<T>::save(*this, value);
+ }
+
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
@@ -526,6 +633,45 @@ public:
llvm::Constant *RethrowFn);
void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
+ return EHStack.pushCleanup<CleanupType>(kind, a0);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+
+ typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
+ return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+
+ typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
+ initFullExprCleanup();
+ }
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -542,7 +688,14 @@ public:
/// process all branch fixups.
void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
- void ActivateCleanup(EHScopeStack::stable_iterator Cleanup);
+ /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+ /// The block cannot be reactivated. Pops it if it's the top of the
+ /// stack.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+
+ /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+ /// Cannot be used to resurrect a deactivated cleanup.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
@@ -557,11 +710,12 @@ public:
public:
/// \brief Enter a new cleanup scope.
- explicit RunCleanupsScope(CodeGenFunction &CGF)
- : CGF(CGF), PerformCleanup(true)
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : CGF(CGF), PerformCleanup(true)
{
CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
+ CGF.DidCallStackSave = false;
}
/// \brief Exit this cleanup scope, emitting any accumulated
@@ -593,7 +747,6 @@ public:
/// the cleanup blocks that have been added.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
- void ResolveAllBranchFixups(llvm::SwitchInst *Switch);
void ResolveBranchFixups(llvm::BasicBlock *Target);
/// The given basic block lies in the current EH scope, but may be a
@@ -608,7 +761,7 @@ public:
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
- JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) {
return getJumpDestInCurrentScope(createBasicBlock(Name));
}
@@ -626,27 +779,169 @@ public:
/// destination.
UnwindDest getRethrowDest();
- /// BeginConditionalBranch - Should be called before a conditional part of an
- /// expression is emitted. For example, before the RHS of the expression below
- /// is emitted:
- ///
- /// b && f(T());
- ///
- /// This is used to make sure that any temporaries created in the conditional
- /// branch are only destroyed if the branch is taken.
- void BeginConditionalBranch() {
- ++ConditionalBranchLevel;
- }
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ llvm::BasicBlock *StartBB;
- /// EndConditionalBranch - Should be called after a conditional part of an
- /// expression has been emitted.
- void EndConditionalBranch() {
- assert(ConditionalBranchLevel != 0 &&
- "Conditional branch mismatch!");
-
- --ConditionalBranchLevel;
- }
+ public:
+ ConditionalEvaluation(CodeGenFunction &CGF)
+ : StartBB(CGF.Builder.GetInsertBlock()) {}
+
+ void begin(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != this);
+ if (!CGF.OutermostConditional)
+ CGF.OutermostConditional = this;
+ }
+
+ void end(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != 0);
+ if (CGF.OutermostConditional == this)
+ CGF.OutermostConditional = 0;
+ }
+
+ /// Returns a block which will be executed prior to each
+ /// evaluation of the conditional code.
+ llvm::BasicBlock *getStartingBlock() const {
+ return StartBB;
+ }
+ };
+
+ /// isInConditionalBranch - Return true if we're currently emitting
+ /// one branch or the other of a conditional expression.
+ bool isInConditionalBranch() const { return OutermostConditional != 0; }
+
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CodeGenFunction &CGF;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *SavedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CodeGenFunction &CGF)
+ : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
+ CGF.OutermostConditional = 0;
+ }
+
+ ~StmtExprEvaluation() {
+ CGF.OutermostConditional = SavedOutermostConditional;
+ CGF.EnsureInsertPoint();
+ }
+ };
+
+ /// An object which temporarily prevents a value from being
+ /// destroyed by aggressive peephole optimizations that assume that
+ /// all uses of a value have been realized in the IR.
+ class PeepholeProtection {
+ llvm::Instruction *Inst;
+ friend class CodeGenFunction;
+
+ public:
+ PeepholeProtection() : Inst(0) {}
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ const OpaqueValueExpr *OpaqueValue;
+ bool BoundLValue;
+ CodeGenFunction::PeepholeProtection Protection;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return expr->isGLValue() || expr->getType()->isRecordType();
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *op) : CGF(CGF) {
+ if (isa<ConditionalOperator>(op)) {
+ OpaqueValue = 0;
+ BoundLValue = false;
+ return;
+ }
+
+ const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
+ init(e->getOpaqueValue(), e->getCommon());
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
+ assert(opaqueValue && "no opaque value expression!");
+ assert(shouldBindAsLValue(opaqueValue));
+ initLValue(lvalue);
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
+ assert(opaqueValue && "no opaque value expression!");
+ assert(!shouldBindAsLValue(opaqueValue));
+ initRValue(rvalue);
+ }
+
+ void pop() {
+ assert(OpaqueValue && "mapping already popped!");
+ popImpl();
+ OpaqueValue = 0;
+ }
+
+ ~OpaqueValueMapping() {
+ if (OpaqueValue) popImpl();
+ }
+
+ private:
+ void popImpl() {
+ if (BoundLValue)
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+
+ void init(const OpaqueValueExpr *ov, const Expr *e) {
+ OpaqueValue = ov;
+ BoundLValue = shouldBindAsLValue(ov);
+ assert(BoundLValue == shouldBindAsLValue(e)
+ && "inconsistent expression value kinds!");
+ if (BoundLValue)
+ initLValue(CGF.EmitLValue(e));
+ else
+ initRValue(CGF.EmitAnyExpr(e));
+ }
+ void initLValue(const LValue &lv) {
+ CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
+ }
+
+ void initRValue(const RValue &rv) {
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ Protection = CGF.protectFromPeepholes(rv);
+ CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
+ }
+ };
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ /// BuildBlockByrefAddress - Computes address location of the
+ /// variable which is declared as __block.
+ llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
@@ -658,10 +953,11 @@ private:
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
+ DeclMapTy LocalDeclMap;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
- llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
+ llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
@@ -682,6 +978,11 @@ private:
/// statement range in current switch instruction.
llvm::BasicBlock *CaseRangeBlock;
+ /// OpaqueLValues - Keeps track of the current set of opaque value
+ /// expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
+
// VLASizeMap - This keeps track of the associated size for each VLA type.
// We track this by the size expression rather than the type itself because
// in certain situations, like a const qualifier applied to an VLA typedef,
@@ -708,21 +1009,17 @@ private:
/// VTT parameter.
ImplicitParamDecl *CXXVTTDecl;
llvm::Value *CXXVTTValue;
-
- /// ConditionalBranchLevel - Contains the nesting level of the current
- /// conditional branch. This is used so that we know if a temporary should be
- /// destroyed conditionally.
- unsigned ConditionalBranchLevel;
+
+ /// OutermostConditional - Points to the outermost active
+ /// conditional control. This is used so that we know if a
+ /// temporary should be destroyed conditionally.
+ ConditionalEvaluation *OutermostConditional;
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
unsigned> > ByRefValueInfo;
-
- /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
- /// number that holds the value.
- unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
@@ -735,6 +1032,8 @@ public:
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
+
/// Returns a pointer to the function's exception object slot, which
/// is assigned in every landing pad.
llvm::Value *getExceptionSlot();
@@ -755,7 +1054,7 @@ public:
return getInvokeDestImpl();
}
- llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
// Objective-C
@@ -769,6 +1068,10 @@ public:
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ void GenerateObjCGetterBody(ObjCIvarDecl *Ivar, bool IsAtomic, bool IsStrong);
+ void GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
+ ObjCIvarDecl *Ivar);
+
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
@@ -783,29 +1086,41 @@ public:
// Block Bits
//===--------------------------------------------------------------------===//
- llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
const llvm::StructType *,
- llvm::Constant *BlockVarLayout,
- std::vector<HelperInfo> *);
+ llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
- const BlockExpr *BExpr,
- CGBlockInfo &Info,
+ const CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- llvm::Constant *& BlockVarLayout,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm);
+ const DeclMapTy &ldm);
+
+ llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *,
+ BlockFieldFlags flags,
+ const VarDecl *BD);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ BlockFieldFlags flags,
+ const VarDecl *BD);
- llvm::Value *LoadBlockStruct();
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+
+ llvm::Value *LoadBlockStruct() {
+ assert(BlockPointer && "no block pointer set!");
+ return BlockPointer;
+ }
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
void AllocateBlockDecl(const BlockDeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
}
- llvm::Value *GetAddrOfBlockDecl(const ValueDecl *D, bool ByRef);
- const llvm::Type *BuildByRefType(const ValueDecl *D);
+ llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ const llvm::Type *BuildByRefType(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
void StartFunction(GlobalDecl GD, QualType RetTy,
@@ -827,21 +1142,21 @@ public:
/// GenerateThunk - Generate a thunk for the given method.
void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
-
+
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
- void InitializeVTablePointer(BaseSubobject Base,
+ void InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
uint64_t OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- void InitializeVTablePointers(BaseSubobject Base,
+ void InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
uint64_t OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
@@ -851,6 +1166,9 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+ /// GetVTablePtr - Return the Value of the vtable pointer member pointed
+ /// to by This.
+ llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
@@ -867,6 +1185,9 @@ public:
/// function instrumentation is enabled.
void EmitFunctionInstrumentation(const char *Fn);
+ /// EmitMCountInstrumentation - Emit call to .mcount.
+ void EmitMCountInstrumentation();
+
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
@@ -910,19 +1231,19 @@ public:
static bool hasAggregateLLVMType(QualType T);
/// createBasicBlock - Create an LLVM basic block.
- llvm::BasicBlock *createBasicBlock(const char *Name="",
- llvm::Function *Parent=0,
- llvm::BasicBlock *InsertBefore=0) {
+ llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "",
+ llvm::Function *parent = 0,
+ llvm::BasicBlock *before = 0) {
#ifdef NDEBUG
- return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore);
+ return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
#else
- return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore);
+ return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
#endif
}
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
- JumpDest getJumpDestForLabel(const LabelStmt *S);
+ JumpDest getJumpDestForLabel(const LabelDecl *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
@@ -974,7 +1295,8 @@ public:
//===--------------------------------------------------------------------===//
LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
- return LValue::MakeAddr(V, T, Alignment, getContext());
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -997,19 +1319,31 @@ public:
/// appropriate alignment.
llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+ /// CreateAggTemp - Create a temporary memory object for the given
+ /// aggregate type.
+ AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
+ }
+
+ /// Emit a cast to void* in the appropriate address space.
+ llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
+
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
/// EmitAnyExpr - Emit code to compute the specified expression which can have
/// any type. The result is returned as an RValue struct. If this is an
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// the result should be returned.
///
/// \param IgnoreResult - True if the resulting value isn't used.
- RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
- bool IsAggLocVolatile = false, bool IgnoreResult = false,
- bool IsInitializer = false);
+ RValue EmitAnyExpr(const Expr *E,
+ AggValueSlot AggSlot = AggValueSlot::ignored(),
+ bool IgnoreResult = false);
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
@@ -1017,14 +1351,13 @@ public:
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
- RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
- bool IsInitializer = false);
+ RValue EmitAnyExprToTemp(const Expr *E);
/// EmitsAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
- bool IsLocationVolatile = false,
- bool IsInitializer = false);
+ bool IsLocationVolatile,
+ bool IsInitializer);
/// EmitAggregateCopy - Emit an aggrate copy.
///
@@ -1049,11 +1382,33 @@ public:
return Res;
}
+ /// getOpaqueLValueMapping - Given an opaque value expression (which
+ /// must be mapped to an l-value), return its mapping.
+ const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+ assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getOpaqueRValueMapping - Given an opaque value expression (which
+ /// must be mapped to an r-value), return its mapping.
+ const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+ assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
/// getAccessedFieldNo - Given an encoded value and a result number, return
/// the input field number being accessed.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
- llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+ llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
llvm::BasicBlock *GetIndirectGotoBlock();
/// EmitNullInitialization - Generate code to set a value of the given type to
@@ -1102,7 +1457,7 @@ public:
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
- llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
@@ -1117,7 +1472,7 @@ public:
llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl);
-
+
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args);
@@ -1125,6 +1480,11 @@ public:
bool ForVirtualBase, llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
@@ -1132,7 +1492,7 @@ public:
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
bool ZeroInitialization = false);
-
+
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
@@ -1154,7 +1514,7 @@ public:
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
-
+
void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
llvm::Value *NumElements);
@@ -1184,25 +1544,37 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitDecl(const Decl &D);
- /// EmitBlockVarDecl - Emit a block variable declaration.
+ /// EmitVarDecl - Emit a local variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitBlockVarDecl(const VarDecl &D);
+ void EmitVarDecl(const VarDecl &D);
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
- /// EmitLocalBlockVarDecl - Emit a local block variable declaration.
+ /// EmitAutoVarDecl - Emit an auto variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
+ void EmitAutoVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
- void EmitStaticBlockVarDecl(const VarDecl &D,
- llvm::GlobalValue::LinkageTypes Linkage);
+ void EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+ /// protectFromPeepholes - Protect a value that we're intending to
+ /// store to the side, but which will probably be used later, from
+ /// aggressive peepholing optimizations that might delete it.
+ ///
+ /// Pass the result to unprotectFromPeepholes to declare that
+ /// protection is no longer required.
+ ///
+ /// There's no particular reason why this shouldn't apply to
+ /// l-values, it's just that no existing peepholes work on pointers.
+ PeepholeProtection protectFromPeepholes(RValue rvalue);
+ void unprotectFromPeepholes(PeepholeProtection protection);
+
//===--------------------------------------------------------------------===//
// Statement Emission
//===--------------------------------------------------------------------===//
@@ -1227,11 +1599,11 @@ public:
bool EmitSimpleStmt(const Stmt *S);
RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
- llvm::Value *AggLoc = 0, bool isAggVol = false);
+ AggValueSlot AVS = AggValueSlot::ignored());
/// EmitLabel - Emit the block for the given label. It is legal to call this
/// function even if there is no current insertion point.
- void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+ void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
void EmitLabelStmt(const LabelStmt &S);
void EmitGotoStmt(const GotoStmt &S);
@@ -1260,7 +1632,7 @@ public:
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
-
+
//===--------------------------------------------------------------------===//
// LValue Expression Emission
//===--------------------------------------------------------------------===//
@@ -1303,17 +1675,27 @@ public:
/// object.
LValue EmitCheckedLValue(const Expr *E);
+ /// EmitToMemory - Change a scalar value from its value
+ /// representation to its in-memory representation.
+ llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty);
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment, QualType Ty);
+ bool Volatile, unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
@@ -1321,9 +1703,8 @@ public:
RValue EmitLoadOfLValue(LValue V, QualType LVType);
RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
- RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
- RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
-
+ RValue EmitLoadOfPropertyRefLValue(LValue LV,
+ ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
@@ -1331,8 +1712,7 @@ public:
void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
QualType Ty);
- void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
- void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
/// EmitStoreThroughLValue.
@@ -1343,9 +1723,13 @@ public:
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
llvm::Value **Result=0);
+ /// Emit an l-value for an assignment (simple or compound) of complex type.
+ LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
+
// Note: only availabe for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
- LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
LValue EmitCallExprLValue(const CallExpr *E);
// Note: only available for agg return types
@@ -1360,25 +1744,26 @@ public:
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
- LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
+ LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
-
+ LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForAnonRecordField(llvm::Value* Base,
- const FieldDecl* Field,
+ const IndirectFieldDecl* Field,
unsigned CVRQualifiers);
LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
unsigned CVRQualifiers);
-
+
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
/// and not the address of the value stored in the reference.
- LValue EmitLValueForFieldInitialization(llvm::Value* Base,
+ LValue EmitLValueForFieldInitialization(llvm::Value* Base,
const FieldDecl* Field,
unsigned CVRQualifiers);
-
+
LValue EmitLValueForIvar(QualType ObjectTy,
llvm::Value* Base, const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
@@ -1390,18 +1775,17 @@ public:
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
- LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
+ LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
-
+
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
- LValue EmitObjCKVCRefLValue(const ObjCImplicitSetterGetterRefExpr *E);
- LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
- void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::ConstantInt *Init);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
//===--------------------------------------------------------------------===//
@@ -1424,7 +1808,7 @@ public:
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const Decl *TargetDecl = 0);
- RValue EmitCallExpr(const CallExpr *E,
+ RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
@@ -1434,8 +1818,15 @@ public:
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty);
- llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *&This, const llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ const llvm::Type *Ty);
+
+ llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
llvm::Value *Callee,
@@ -1453,7 +1844,7 @@ public:
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
-
+
RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E);
@@ -1464,15 +1855,15 @@ public:
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- llvm::Value *EmitNeonCall(llvm::Function *F,
+ llvm::Value *EmitNeonCall(llvm::Function *F,
llvm::SmallVectorImpl<llvm::Value*> &O,
- const char *name, bool splat = false,
+ const char *name,
unsigned shift = 0, bool rightshift = false);
- llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
- bool widen = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
bool negateForRightShift);
-
+
+ llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -1481,17 +1872,10 @@ public:
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
- RValue EmitObjCPropertyGet(const Expr *E,
- ReturnValueSlot Return = ReturnValueSlot());
- RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S,
- ReturnValueSlot Return = ReturnValueSlot());
- void EmitObjCPropertySet(const Expr *E, RValue Src);
- void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
-
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
- RValue EmitReferenceBindingToExpr(const Expr* E,
+ RValue EmitReferenceBindingToExpr(const Expr* E,
const NamedDecl *InitializedDecl);
//===--------------------------------------------------------------------===//
@@ -1516,12 +1900,10 @@ public:
QualType DstTy);
- /// EmitAggExpr - Emit the computation of the specified expression of
- /// aggregate type. The result is computed into DestPtr. Note that if
- /// DestPtr is null, the value of the aggregate expression is not needed.
- void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
- bool IgnoreResult = false, bool IsInitializer = false,
- bool RequiresGCollection = false);
+ /// EmitAggExpr - Emit the computation of the specified expression
+ /// of aggregate type. The result is computed into the given slot,
+ /// which may be null to indicate that the value is not needed.
+ void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
/// EmitAggExprToLValue - Emit the computation of the specified expression of
/// aggregate type into a temporary LValue.
@@ -1534,10 +1916,9 @@ public:
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
- ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
- bool IgnoreImag = false,
- bool IgnoreRealAssign = false,
- bool IgnoreImagAssign = false);
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
/// of complex type, storing into the specified Value*.
@@ -1550,25 +1931,20 @@ public:
/// LoadComplexFromAddr - Load a complex number from the specified address.
ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
- /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a
- /// static block var decl.
- llvm::GlobalVariable *CreateStaticBlockVarDecl(const VarDecl &D,
- const char *Separator,
+ /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
+ /// a static local variable.
+ llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
llvm::GlobalValue::LinkageTypes Linkage);
-
- /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+
+ /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
- AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
- llvm::GlobalVariable *GV);
-
+ AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
- /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime
- /// initialized static block var decl.
- void EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
- llvm::GlobalVariable *GV);
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
@@ -1579,6 +1955,13 @@ public:
void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
llvm::Constant *DeclPtr);
+ /// Emit code in this function to perform a guarded variable
+ /// initialization. Guarded initializations are used when it's not
+ /// possible to prove that an initialization will be done exactly
+ /// once, e.g. with a static local variable or a static data member
+ /// of a class template.
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
+
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
@@ -1591,14 +1974,16 @@ public:
const std::vector<std::pair<llvm::WeakVH,
llvm::Constant*> > &DtorsAndObjects);
- void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
+ llvm::GlobalVariable *Addr);
- void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+ void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
+
+ void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
+ const Expr *Exp);
- RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
- llvm::Value *AggLoc = 0,
- bool IsAggLocVolatile = false,
- bool IsInitializer = false);
+ RValue EmitExprWithCleanups(const ExprWithCleanups *E,
+ AggValueSlot Slot =AggValueSlot::ignored());
void EmitCXXThrowExpr(const CXXThrowExpr *E);
@@ -1626,7 +2011,7 @@ public:
/// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
/// generate a branch around the created basic block as necessary.
llvm::BasicBlock *getTrapBB();
-
+
/// EmitCallArg - Emit a single call argument.
RValue EmitCallArg(const Expr *E, QualType ArgType);
@@ -1678,12 +2063,26 @@ private:
E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
assert(Arg != ArgEnd && "Running over edge of argument list!");
QualType ArgType = *I;
-
+#ifndef NDEBUG
+ QualType ActualArgType = Arg->getType();
+ if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
+ QualType ActualBaseType =
+ ActualArgType->getAs<PointerType>()->getPointeeType();
+ QualType ArgBaseType =
+ ArgType->getAs<PointerType>()->getPointeeType();
+ if (ArgBaseType->isVariableArrayType()) {
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(ActualBaseType)) {
+ if (!VAT->getSizeExpr())
+ ActualArgType = ArgType;
+ }
+ }
+ }
assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
getTypePtr() ==
- getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ getContext().getCanonicalType(ActualArgType).getTypePtr() &&
"type mismatch in call argument!");
-
+#endif
Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
ArgType));
}
@@ -1710,36 +2109,78 @@ private:
void EmitDeclMetadata();
};
-/// CGBlockInfo - Information to generate a block literal.
-class CGBlockInfo {
-public:
- /// Name - The name of the block, kindof.
- const char *Name;
-
- /// DeclRefs - Variables from parent scopes that have been
- /// imported into this block.
- llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
-
- /// InnerBlocks - This block and the blocks it encloses.
- llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
-
- /// CXXThisRef - Non-null if 'this' was required somewhere, in
- /// which case this is that expression.
- const CXXThisExpr *CXXThisRef;
-
- /// NeedsObjCSelf - True if something in this block has an implicit
- /// reference to 'self'.
- bool NeedsObjCSelf;
-
- /// These are initialized by GenerateBlockFunction.
- bool BlockHasCopyDispose;
- CharUnits BlockSize;
- CharUnits BlockAlign;
- llvm::SmallVector<const Expr*, 8> BlockLayout;
-
- CGBlockInfo(const char *Name);
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ /// Try to save the given value.
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
+
+ // Otherwise we need an alloca.
+ llvm::Value *alloca =
+ CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
+
+ return saved_type(alloca, true);
+ }
+
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ if (!value.getInt()) return value.getPointer();
+ return CGF.Builder.CreateLoad(value.getPointer());
+ }
};
-
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ Kind K;
+ saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGExprCXX.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index d125b370a07a..9e5d7cf11276 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -14,10 +14,10 @@
#include "CodeGenModule.h"
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
+#include "CodeGenTBAA.h"
#include "CGCall.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
-#include "Mangle.h"
#include "TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
@@ -25,6 +25,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
@@ -36,6 +37,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
@@ -57,19 +59,19 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
Diagnostic &diags)
- : BlockModule(C, M, TD, Types, *this), Context(C),
- Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ : Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ TBAA(0),
VTables(*this), Runtime(0),
- CFConstantStringClassRef(0), NSConstantStringClassRef(0),
+ CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
- BlockObjectAssign(0), BlockObjectDispose(0){
-
+ BlockObjectAssign(0), BlockObjectDispose(0),
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
if (!Features.ObjC1)
Runtime = 0;
else if (!Features.NeXTRuntime)
@@ -79,13 +81,32 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
else
Runtime = CreateMacObjCRuntime(*this);
+ // Enable TBAA unless it's suppressed.
+ if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOptions(),
+ ABI.getMangleContext());
+
// If debug info generation is enabled, create the CGDebugInfo object.
DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+
+ Block.GlobalUniqueCount = 0;
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ PointerWidthInBits = C.Target.getPointerWidth(0);
+ IntTy = llvm::IntegerType::get(LLVMContext, C.Target.getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
}
CodeGenModule::~CodeGenModule() {
delete Runtime;
delete &ABI;
+ delete TBAA;
delete DebugInfo;
}
@@ -110,10 +131,23 @@ void CodeGenModule::Release() {
EmitAnnotations();
EmitLLVMUsed();
+ SimplifyPersonality();
+
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
}
+llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfo(QTy);
+}
+
+void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo) {
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+}
+
bool CodeGenModule::isTargetDarwin() const {
return getContext().Target.getTriple().getOS() == llvm::Triple::Darwin;
}
@@ -143,93 +177,34 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
-LangOptions::VisibilityMode
-CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
- if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- if (VD->getStorageClass() == SC_PrivateExtern)
- return LangOptions::Hidden;
-
- if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
- switch (attr->getVisibility()) {
- default: assert(0 && "Unknown visibility!");
- case VisibilityAttr::Default:
- return LangOptions::Default;
- case VisibilityAttr::Hidden:
- return LangOptions::Hidden;
- case VisibilityAttr::Protected:
- return LangOptions::Protected;
- }
- }
-
- if (getLangOptions().CPlusPlus) {
- // Entities subject to an explicit instantiation declaration get default
- // visibility.
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
- if (Function->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const ClassTemplateSpecializationDecl *ClassSpec
- = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- if (ClassSpec->getSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
- if (Record->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if (Var->isStaticDataMember() &&
- (Var->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration))
- return LangOptions::Default;
- }
-
- // If -fvisibility-inlines-hidden was provided, then inline C++ member
- // functions get "hidden" visibility by default.
- if (getLangOptions().InlineVisibilityHidden)
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isInlined())
- return LangOptions::Hidden;
- }
-
- // If this decl is contained in a class, it should have the same visibility
- // as the parent class.
- if (const DeclContext *DC = D->getDeclContext())
- if (DC->isRecord())
- return getDeclVisibilityMode(cast<Decl>(DC));
-
- return getLangOptions().getVisibilityMode();
-}
-
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
- const Decl *D) const {
+ const NamedDecl *D) const {
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
return;
}
- switch (getDeclVisibilityMode(D)) {
- default: assert(0 && "Unknown visibility!");
- case LangOptions::Default:
- return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- case LangOptions::Hidden:
- return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- case LangOptions::Protected:
- return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- }
+ // Set visibility for definitions.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.visibilityExplicit() || !GV->hasAvailableExternallyLinkage())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
/// Set the symbol visibility of type information (vtable and RTTI)
/// associated with the given type.
void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
const CXXRecordDecl *RD,
- bool IsForRTTI) const {
+ TypeVisibilityKind TVK) const {
setGlobalVisibility(GV, RD);
if (!CodeGenOpts.HiddenWeakVTables)
return;
+ // We never want to drop the visibility for RTTI names.
+ if (TVK == TVK_ForRTTIName)
+ return;
+
// We want to drop the visibility to hidden for weak type symbols.
// This isn't possible if there might be unresolved references
// elsewhere that rely on this symbol being visible.
@@ -238,7 +213,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// in CGVTables.cpp.
// Preconditions.
- if (GV->getLinkage() != llvm::GlobalVariable::WeakODRLinkage ||
+ if (GV->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage ||
GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
return;
@@ -273,12 +248,14 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// If there's a key function, there may be translation units
// that don't have the key function's definition. But ignore
// this if we're emitting RTTI under -fno-rtti.
- if (!IsForRTTI || Features.RTTI)
+ if (!(TVK != TVK_ForRTTI) || Features.RTTI) {
if (Context.getKeyFunction(RD))
return;
+ }
// Otherwise, drop the visibility to hidden.
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setUnnamedAddr(true);
}
llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
@@ -297,16 +274,18 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
}
llvm::SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
- getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer);
+ getCXXABI().getMangleContext().mangleBlock(BD, Out);
else
- getCXXABI().getMangleContext().mangleName(ND, Buffer);
+ getCXXABI().getMangleContext().mangleName(ND, Out);
// Allocate space for the mangled name.
+ Out.flush();
size_t Length = Buffer.size();
char *Name = MangledNamesAllocator.Allocate<char>(Length);
std::copy(Buffer.begin(), Buffer.end(), Name);
@@ -316,9 +295,19 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
return Str;
}
-void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
- const BlockDecl *BD) {
- getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
+void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ MangleContext &MangleCtx = getCXXABI().getMangleContext();
+ const Decl *D = GD.getDecl();
+ llvm::raw_svector_ostream Out(Buffer.getBuffer());
+ if (D == 0)
+ MangleCtx.mangleGlobalBlock(BD, Out);
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
+ else
+ MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
@@ -342,9 +331,7 @@ void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- std::vector<const llvm::Type*>(),
- false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
@@ -412,14 +399,18 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
- return llvm::Function::LinkOnceODRLinkage;
+ return !Context.getLangOptions().AppleKext
+ ? llvm::Function::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
// An explicit instantiation of a template has weak linkage, since
// explicit instantiations can occur in multiple translation units
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
if (Linkage == GVA_ExplicitTemplateInstantiation)
- return llvm::Function::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext
+ ? llvm::Function::WeakODRLinkage
+ : llvm::Function::InternalLinkage;
// Otherwise, we have strong external linkage.
assert(Linkage == GVA_StrongExternal);
@@ -455,9 +446,15 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (D->hasAttr<AlwaysInlineAttr>())
F->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (D->hasAttr<NakedAttr>())
+ F->addFnAttr(llvm::Attribute::Naked);
+
if (D->hasAttr<NoInlineAttr>())
F->addFnAttr(llvm::Attribute::NoInline);
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ F->setUnnamedAddr(true);
+
if (Features.getStackProtectorMode() == LangOptions::SSPOn)
F->addFnAttr(llvm::Attribute::StackProtect);
else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
@@ -474,7 +471,10 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
void CodeGenModule::SetCommonAttributes(const Decl *D,
llvm::GlobalValue *GV) {
- setGlobalVisibility(GV, D);
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ setGlobalVisibility(GV, ND);
+ else
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
if (D->hasAttr<UsedAttr>())
AddUsedGlobal(GV);
@@ -516,6 +516,11 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
F->setLinkage(llvm::Function::ExternalWeakLinkage);
} else {
F->setLinkage(llvm::Function::ExternalLinkage);
+
+ NamedDecl::LinkageInfo LV = FD->getLinkageAndVisibility();
+ if (LV.linkage() == ExternalLinkage && LV.visibilityExplicit()) {
+ F->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
}
if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
@@ -634,6 +639,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
new llvm::GlobalVariable(*M, unit->getType(), false,
llvm::GlobalValue::PrivateLinkage, unit,
".str");
+ unitGV->setUnnamedAddr(true);
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[4] = {
@@ -664,7 +670,8 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
@@ -796,7 +803,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D) {
+ GlobalDecl D, bool ForVTable) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -825,8 +832,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
- FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- std::vector<const llvm::Type*>(), false);
+ FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
IsIncompleteFunction = true;
}
@@ -846,30 +852,34 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
// list, and remove it from DeferredDecls (since we don't need it anymore).
DeferredDeclsToEmit.push_back(DDI->second);
DeferredDecls.erase(DDI);
- } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
- // If this the first reference to a C++ inline function in a class, queue up
- // the deferred function body for emission. These are not seen as
- // top-level declarations.
- if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
- DeferredDeclsToEmit.push_back(D);
- // A called constructor which has no definition or declaration need be
- // synthesized.
- else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- if (CD->isImplicit()) {
- assert(CD->isUsed() && "Sema doesn't consider constructor as used.");
- DeferredDeclsToEmit.push_back(D);
- }
- } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
- if (DD->isImplicit()) {
- assert(DD->isUsed() && "Sema doesn't consider destructor as used.");
- DeferredDeclsToEmit.push_back(D);
- }
- } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isCopyAssignment() && MD->isImplicit()) {
- assert(MD->isUsed() && "Sema doesn't consider CopyAssignment as used.");
- DeferredDeclsToEmit.push_back(D);
+
+ // Otherwise, there are cases we have to worry about where we're
+ // using a declaration for which we must emit a definition but where
+ // we might not find a top-level definition:
+ // - member functions defined inline in their classes
+ // - friend functions defined inline in some class
+ // - special member functions with implicit definitions
+ // If we ever change our AST traversal to walk into class methods,
+ // this will be unnecessary.
+ //
+ // We also don't emit a definition for a function if it's going to be an entry
+ // in a vtable, unless it's already marked as used.
+ } else if (getLangOptions().CPlusPlus && D.getDecl()) {
+ // Look for a declaration that's lexically in a record.
+ const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ do {
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ if (FD->isImplicit() && !ForVTable) {
+ assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ } else if (FD->isThisDeclarationADefinition()) {
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ }
}
- }
+ FD = FD->getPreviousDeclaration();
+ } while (FD);
}
// Make sure the result is of the requested type.
@@ -886,13 +896,14 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty) {
+ const llvm::Type *Ty,
+ bool ForVTable) {
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
llvm::StringRef MangledName = getMangledName(GD);
- return GetOrCreateLLVMFunction(MangledName, Ty, GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
}
/// CreateRuntimeFunction - Create a new runtime function with the specified
@@ -900,7 +911,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
llvm::StringRef Name) {
- return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
}
static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
@@ -924,7 +935,8 @@ static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *Ty,
- const VarDecl *D) {
+ const VarDecl *D,
+ bool UnnamedAddr) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -935,6 +947,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
WeakRefReferences.erase(Entry);
}
+ if (UnnamedAddr)
+ Entry->setUnnamedAddr(true);
+
if (Entry->getType() == Ty)
return Entry;
@@ -965,13 +980,20 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
// handling.
GV->setConstant(DeclIsConstantGlobal(Context, D));
- // FIXME: Merge with other attribute handling code.
- if (D->getStorageClass() == SC_PrivateExtern)
- GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-
- if (D->hasAttr<WeakAttr>() ||
- D->hasAttr<WeakImportAttr>())
- GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // Set linkage and visibility in case we never see a definition.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.linkage() != ExternalLinkage) {
+ // Don't set internal linkage on declarations.
+ } else {
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ // Set visibility on a declaration only if it's explicit.
+ if (LV.visibilityExplicit())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
GV->setThreadLocal(D->isThreadSpecified());
}
@@ -980,6 +1002,45 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
}
+llvm::GlobalVariable *
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
+ const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ // Because C++ name mangling, the only way we can end up with an already
+ // existing global with the same name is if it has been declared extern "C".
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -1003,7 +1064,8 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
llvm::StringRef Name) {
- return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
+ true);
}
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
@@ -1045,44 +1107,63 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (KeyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
+ // When compiling with optimizations turned on, we emit all vtables,
+ // even if the key function is not defined in the current translation
+ // unit. If this is the case, use available_externally linkage.
+ if (!Def && CodeGenOpts.OptimizationLevel)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
if (KeyFunction->isInlined())
- return llvm::GlobalVariable::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
case TSK_ExplicitInstantiationDefinition:
- return llvm::GlobalVariable::WeakODRLinkage;
-
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::WeakODRLinkage :
+ llvm::Function::InternalLinkage;
+
case TSK_ExplicitInstantiationDeclaration:
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return llvm::GlobalVariable::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
}
}
+ if (Context.getLangOptions().AppleKext)
+ return llvm::Function::InternalLinkage;
+
switch (RD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDefinition:
- return llvm::GlobalVariable::WeakODRLinkage;
-
- case TSK_ExplicitInstantiationDeclaration:
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return llvm::GlobalVariable::WeakODRLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
}
// Silence GCC warning.
- return llvm::GlobalVariable::WeakODRLinkage;
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
- return CharUnits::fromQuantity(
- TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
+ return Context.toCharUnitsFromBits(
+ TheTargetData.getTypeStoreSizeInBits(Ty));
}
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
@@ -1112,7 +1193,6 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
T = D->getType();
if (getLangOptions().CPlusPlus) {
- EmitCXXGlobalVarDeclInitFunc(D);
Init = EmitNullConstant(T);
NonConstInit = true;
} else {
@@ -1183,42 +1263,57 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setConstant(true);
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
-
+
// Set the llvm linkage type as appropriate.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ GetLLVMLinkageVarDefinition(D, GV);
+ GV->setLinkage(Linkage);
+ if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit the initializer function if necessary.
+ if (NonConstInit)
+ EmitCXXGlobalVarDeclInitFunc(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
if (Linkage == GVA_Internal)
- GV->setLinkage(llvm::Function::InternalLinkage);
+ return llvm::Function::InternalLinkage;
else if (D->hasAttr<DLLImportAttr>())
- GV->setLinkage(llvm::Function::DLLImportLinkage);
+ return llvm::Function::DLLImportLinkage;
else if (D->hasAttr<DLLExportAttr>())
- GV->setLinkage(llvm::Function::DLLExportLinkage);
+ return llvm::Function::DLLExportLinkage;
else if (D->hasAttr<WeakAttr>()) {
if (GV->isConstant())
- GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
+ return llvm::GlobalVariable::WeakODRLinkage;
else
- GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ return llvm::GlobalVariable::WeakAnyLinkage;
} else if (Linkage == GVA_TemplateInstantiation ||
Linkage == GVA_ExplicitTemplateInstantiation)
// FIXME: It seems like we can provide more specific linkage here
// (LinkOnceODR, WeakODR).
- GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
- else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
+ return llvm::GlobalVariable::WeakAnyLinkage;
+ else if (!getLangOptions().CPlusPlus &&
+ ((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
+ D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
!D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
// Thread local vars aren't considered common linkage.
- GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
- // common vars aren't constant even if declared const.
- GV->setConstant(false);
- } else
- GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
-
- SetCommonAttributes(D, GV);
-
- // Emit global variable debug information.
- if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D->getLocation());
- DI->EmitGlobalVariable(GV, D);
+ return llvm::GlobalVariable::CommonLinkage;
}
+ return llvm::GlobalVariable::ExternalLinkage;
}
/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
@@ -1295,7 +1390,6 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
- getCXXABI().getMangleContext().mangleInitDiscriminator();
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1345,9 +1439,16 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
Entry = NewFn;
}
+ // We need to set linkage and visibility on the function before
+ // generating code for it because various parts of IR generation
+ // want to propagate this information down (e.g. to local static
+ // declarations).
llvm::Function *Fn = cast<llvm::Function>(Entry);
setFunctionLinkage(D, Fn);
+ // FIXME: this is redundant with part of SetFunctionDefinitionAttributes
+ setGlobalVisibility(Fn, D);
+
CodeGenFunction(*this).GenerateCode(D, Fn);
SetFunctionDefinitionAttributes(D, Fn);
@@ -1378,7 +1479,8 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// if a deferred decl.
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
@@ -1444,7 +1546,7 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
const llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
- return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD));
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD), /*ForVTable=*/false);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
@@ -1453,27 +1555,6 @@ llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
(llvm::Intrinsic::ID)IID, Tys, NumTys);
}
-
-llvm::Function *CodeGenModule::getMemCpyFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memcpy, ArgTypes, 3);
-}
-
-llvm::Function *CodeGenModule::getMemMoveFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memmove, ArgTypes, 3);
-}
-
-llvm::Function *CodeGenModule::getMemSetFn(const llvm::Type *DestType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[2] = { DestType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memset, ArgTypes, 2);
-}
-
static llvm::StringMapEntry<llvm::Constant*> &
GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
const StringLiteral *Literal,
@@ -1494,18 +1575,9 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
- ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
- &ToPtr, ToPtr + NumBytes,
- strictConversion);
-
- // Check for conversion failure.
- if (Result != conversionOK) {
- // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
- // this duplicate code.
- assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
- StringLength = NumBytes;
- return Map.GetOrCreateValue(String);
- }
+ (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
@@ -1595,6 +1667,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
+ GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
@@ -1618,7 +1691,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
llvm::Constant *
-CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
+CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
@@ -1634,16 +1707,27 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get _NSConstantStringClassReference.
- if (!NSConstantStringClassRef) {
+ if (!ConstantStringClassRef) {
+ std::string StringClass(getLangOptions().ObjCConstantStringClass);
const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::Constant *GV = CreateRuntimeVariable(Ty,
- Features.ObjCNonFragileABI ?
- "OBJC_CLASS_$_NSConstantString" :
- "_NSConstantStringClassReference");
+ llvm::Constant *GV;
+ if (StringClass.empty())
+ GV = CreateRuntimeVariable(Ty,
+ Features.ObjCNonFragileABI ?
+ "OBJC_CLASS_$_NSConstantString" :
+ "_NSConstantStringClassReference");
+ else {
+ std::string str;
+ if (Features.ObjCNonFragileABI)
+ str = "OBJC_CLASS_$_" + StringClass;
+ else
+ str = "_" + StringClass + "ClassReference";
+ GV = CreateRuntimeVariable(Ty, str);
+ }
// Decay array -> ptr
- NSConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
}
QualType NSTy = getContext().getNSConstantStringType();
@@ -1654,7 +1738,7 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
std::vector<llvm::Constant*> Fields(3);
// Class pointer.
- Fields[0] = NSConstantStringClassRef;
+ Fields[0] = ConstantStringClassRef;
// String pointer.
llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
@@ -1675,6 +1759,7 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
+ GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
@@ -1704,15 +1789,16 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
/// GetStringForStringLiteral - Return the appropriate bytes for a
/// string literal, properly padded to match the literal type.
std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const ASTContext &Context = getContext();
const ConstantArrayType *CAT =
- getContext().getAsConstantArrayType(E->getType());
+ Context.getAsConstantArrayType(E->getType());
assert(CAT && "String isn't pointer or array!");
// Resize the string to the right size.
uint64_t RealLen = CAT->getSize().getZExtValue();
if (E->isWide())
- RealLen *= getContext().Target.getWCharWidth()/8;
+ RealLen *= Context.Target.getWCharWidth() / Context.getCharWidth();
std::string Str = E->getString().str();
Str.resize(RealLen, '\0');
@@ -1756,9 +1842,12 @@ static llvm::Constant *GenerateStringLiteral(const std::string &str,
llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
// Create a global variable for this string
- return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
- llvm::GlobalValue::PrivateLinkage,
- C, GlobalName);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ GV->setUnnamedAddr(true);
+ return GV;
}
/// GetAddrOfConstantString - Returns a pointer to a character array
@@ -2097,7 +2186,7 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
return BlockObjectDispose =
@@ -2119,8 +2208,8 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
return BlockObjectAssign =
@@ -2139,8 +2228,8 @@ llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
}
// Otherwise construct the variable by hand.
- return NSConcreteGlobalBlock = CreateRuntimeVariable(
- PtrToInt8Ty, "_NSConcreteGlobalBlock");
+ return NSConcreteGlobalBlock =
+ CreateRuntimeVariable(Int8PtrTy, "_NSConcreteGlobalBlock");
}
llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
@@ -2155,8 +2244,8 @@ llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
}
// Otherwise construct the variable by hand.
- return NSConcreteStackBlock = CreateRuntimeVariable(
- PtrToInt8Ty, "_NSConcreteStackBlock");
+ return NSConcreteStackBlock =
+ CreateRuntimeVariable(Int8PtrTy, "_NSConcreteStackBlock");
}
///@}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index cabff9e1cad5..b6bd37c1c0d3 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -14,17 +14,16 @@
#ifndef CLANG_CODEGEN_CODEGENMODULE_H
#define CLANG_CODEGEN_CODEGENMODULE_H
+#include "clang/Basic/ABI.h"
#include "clang/Basic/LangOptions.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "CGBlocks.h"
+#include "clang/AST/Mangle.h"
#include "CGCall.h"
-#include "CGCXX.h"
#include "CGVTables.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
-#include "Mangle.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
@@ -66,14 +65,16 @@ namespace clang {
class Diagnostic;
class AnnotateAttr;
class CXXDestructorDecl;
+ class MangleBuffer;
namespace CodeGen {
class CodeGenFunction;
+ class CodeGenTBAA;
class CGCXXABI;
class CGDebugInfo;
class CGObjCRuntime;
- class MangleBuffer;
+ class BlockFieldFlags;
struct OrderGlobalInits {
unsigned int priority;
@@ -93,10 +94,39 @@ namespace CodeGen {
return priority == RHS.priority && lex_order < RHS.lex_order;
}
};
+
+ struct CodeGenTypeCache {
+ /// i8, i32, and i64
+ const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+
+ /// int
+ const llvm::IntegerType *IntTy;
+
+ /// intptr_t and size_t, which we assume are the same
+ union {
+ const llvm::IntegerType *IntPtrTy;
+ const llvm::IntegerType *SizeTy;
+ };
+
+ /// void* in address space 0
+ union {
+ const llvm::PointerType *VoidPtrTy;
+ const llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ const llvm::PointerType *VoidPtrPtrTy;
+ const llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The width of an address-zero pointer.
+ unsigned char PointerWidthInBits;
+ };
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
-class CodeGenModule : public BlockModule {
+class CodeGenModule : public CodeGenTypeCache {
CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
@@ -111,6 +141,7 @@ class CodeGenModule : public BlockModule {
Diagnostic &Diags;
CGCXXABI &ABI;
CodeGenTypes Types;
+ CodeGenTBAA *TBAA;
/// VTables - Holds information about C++ vtables.
CodeGenVTables VTables;
@@ -183,9 +214,9 @@ class CodeGenModule : public BlockModule {
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
- /// NSConstantStringClassRef - Cached reference to the class for constant
+ /// ConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
- llvm::Constant *NSConstantStringClassRef;
+ llvm::Constant *ConstantStringClassRef;
/// Lazily create the Objective-C runtime
void createObjCRuntime();
@@ -205,6 +236,16 @@ class CodeGenModule : public BlockModule {
llvm::Constant *BlockObjectAssign;
llvm::Constant *BlockObjectDispose;
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ llvm::DenseMap<uint64_t, llvm::Constant *> AssignCache;
+ llvm::DenseMap<uint64_t, llvm::Constant *> DestroyCache;
+
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
@@ -246,21 +287,43 @@ public:
CodeGenVTables &getVTables() { return VTables; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Context.Target; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
const TargetCodeGenInfo &getTargetCodeGenInfo();
bool isTargetDarwin() const;
- /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
- LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ static void DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo);
/// setGlobalVisibility - Set the visibility for the given LLVM
/// GlobalValue.
- void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ /// TypeVisibilityKind - The kind of global variable that is passed to
+ /// setTypeVisibility
+ enum TypeVisibilityKind {
+ TVK_ForVTT,
+ TVK_ForVTable,
+ TVK_ForRTTI,
+ TVK_ForRTTIName
+ };
/// setTypeVisibility - Set the visibility for the given global
/// value which holds information about a type.
void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
- bool IsForRTTI) const;
+ TypeVisibilityKind TVK) const;
+
+ static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
+ switch (V) {
+ case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
+ case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility;
+ case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown visibility!");
+ return llvm::GlobalValue::DefaultVisibility;
+ }
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
if (isa<CXXConstructorDecl>(GD.getDecl()))
@@ -275,6 +338,14 @@ public:
return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
}
+ /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the given
+ /// type. If a variable with a different type already exists then a new
+ /// variable with the right type will be created and all uses of the old
+ /// variable will be replaced with a bitcast to the new variable.
+ llvm::GlobalVariable *
+ CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name, const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -286,7 +357,8 @@ public:
/// non-null, then this function will use the specified type if it has to
/// create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty = 0);
+ const llvm::Type *Ty = 0,
+ bool ForVTable = false);
/// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
/// for the given type.
@@ -304,6 +376,29 @@ public:
GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned Align,
+ const VarDecl *variable);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned Align,
+ const VarDecl *variable);
+
+ /// getGlobalUniqueCount - Fetches the global unique block count.
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+
+ /// getBlockDescriptorType - Fetches the type of a generic block
+ /// descriptor.
+ const llvm::Type *getBlockDescriptorType();
+
+ /// getGenericBlockLiteralType - The type of a generic block literal.
+ const llvm::Type *getGenericBlockLiteralType();
+
+ /// GetAddrOfGlobalBlock - Gets the address of a block which
+ /// requires no captures.
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
/// GetStringForStringLiteral - Return the appropriate bytes for a string
/// literal, properly padded to match the literal type. If only the address of
@@ -314,9 +409,10 @@ public:
/// for the given string.
llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
- /// GetAddrOfConstantNSString - Return a pointer to a constant NSString object
- /// for the given string.
- llvm::Constant *GetAddrOfConstantNSString(const StringLiteral *Literal);
+ /// GetAddrOfConstantString - Return a pointer to a constant NSString object
+ /// for the given string. Or a user defined String object as defined via
+ /// -fconstant-string-class=class_name option.
+ llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
/// for the given string literal.
@@ -363,17 +459,6 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getMemCpyFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType);
-
- llvm::Function *getMemMoveFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType);
-
- llvm::Function *getMemSetFn(const llvm::Type *DestType,
- const llvm::Type *SizeType);
-
llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
unsigned NumTys = 0);
@@ -417,6 +502,8 @@ public:
Types.UpdateCompletedType(TD);
}
+ llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+
/// EmitConstantExpr - Try to emit the given expression as a
/// constant; returns 0 if the expression cannot be emitted as a
/// constant.
@@ -485,7 +572,8 @@ public:
unsigned &CallingConv);
llvm::StringRef getMangledName(GlobalDecl GD);
- void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD);
+ void getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD);
void EmitTentativeDefinition(const VarDecl *D);
@@ -500,13 +588,18 @@ public:
/// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
/// and type information of the given class.
- static llvm::GlobalVariable::LinkageTypes
- getVTableLinkage(const CXXRecordDecl *RD);
+ llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
/// GetTargetTypeStoreSize - Return the store size, in character units, of
/// the given LLVM type.
CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
-
+
+ /// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
+ /// variable.
+ llvm::GlobalValue::LinkageTypes
+ GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV);
+
std::vector<const CXXRecordDecl*> DeferredVTables;
private:
@@ -514,10 +607,12 @@ private:
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D);
+ GlobalDecl D,
+ bool ForVTable);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *PTy,
- const VarDecl *D);
+ const VarDecl *D,
+ bool UnnamedAddr = false);
/// SetCommonAttributes - Set attributes which are common to any
/// form of a global definition (alias, Objective-C method,
@@ -547,7 +642,7 @@ private:
void EmitAliasDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
-
+
// C++ related functions.
bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
@@ -578,7 +673,8 @@ private:
/// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
void EmitCXXGlobalDtorFunc();
- void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr);
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
@@ -613,6 +709,10 @@ private:
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
bool MayDeferGeneration(const ValueDecl *D);
+
+ /// SimplifyPersonality - Check whether we can use a "simpler", more
+ /// core exceptions personality function.
+ void SimplifyPersonality();
};
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
new file mode 100644
index 000000000000..3f2c6cabf2ae
--- /dev/null
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -0,0 +1,180 @@
+//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use. Relevant standards text includes:
+//
+// C99 6.5p7
+// C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTBAA.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Constants.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const LangOptions &Features, MangleContext &MContext)
+ : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ Root(0), Char(0) {
+}
+
+CodeGenTBAA::~CodeGenTBAA() {
+}
+
+llvm::MDNode *CodeGenTBAA::getRoot() {
+ // Define the root of the tree. This identifies the tree, so that
+ // if our LLVM IR is linked with LLVM IR from a different front-end
+ // (or a different version of this front-end), their TBAA trees will
+ // remain distinct, and the optimizer will treat them conservatively.
+ if (!Root)
+ Root = getTBAAInfoForNamedType("Simple C/C++ TBAA", 0);
+
+ return Root;
+}
+
+llvm::MDNode *CodeGenTBAA::getChar() {
+ // Define the root of the tree for user-accessible memory. C and C++
+ // give special powers to char and certain similar types. However,
+ // these special powers only cover user-accessible memory, and doesn't
+ // include things like vtables.
+ if (!Char)
+ Char = getTBAAInfoForNamedType("omnipotent char", getRoot());
+
+ return Char;
+}
+
+/// getTBAAInfoForNamedType - Create a TBAA tree node with the given string
+/// as its identifier, and the given Parent node as its tree parent.
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(llvm::StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly) {
+ // Currently there is only one flag defined - the readonly flag.
+ llvm::Value *Flags = 0;
+ if (Readonly)
+ Flags = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), true);
+
+ // Set up the mdnode operand list.
+ llvm::Value *Ops[] = {
+ llvm::MDString::get(VMContext, NameStr),
+ Parent,
+ Flags
+ };
+
+ // Create the mdnode.
+ return llvm::MDNode::get(VMContext, Ops, llvm::array_lengthof(Ops) - !Flags);
+}
+
+static bool TypeHasMayAlias(QualType QTy) {
+ // Tagged types have declarations, and therefore may have attributes.
+ if (const TagType *TTy = dyn_cast<TagType>(QTy))
+ return TTy->getDecl()->hasAttr<MayAliasAttr>();
+
+ // Typedef types have declarations, and therefore may have attributes.
+ if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) {
+ if (TTy->getDecl()->hasAttr<MayAliasAttr>())
+ return true;
+ // Also, their underlying types may have relevant attributes.
+ return TypeHasMayAlias(TTy->desugar());
+ }
+
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // If the type has the may_alias attribute (even on a typedef), it is
+ // effectively in the general char alias class.
+ if (TypeHasMayAlias(QTy))
+ return getChar();
+
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = MetadataCache[Ty])
+ return N;
+
+ // Handle builtin types.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
+ switch (BTy->getKind()) {
+ // Character types are special and can alias anything.
+ // In C++, this technically only includes "char" and "unsigned char",
+ // and not "signed char". In C, it includes all three. For now,
+ // the risk of exploiting this detail in C++ seems likely to outweigh
+ // the benefit.
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ return getChar();
+
+ // Unsigned types can alias their corresponding signed types.
+ case BuiltinType::UShort:
+ return getTBAAInfo(Context.ShortTy);
+ case BuiltinType::UInt:
+ return getTBAAInfo(Context.IntTy);
+ case BuiltinType::ULong:
+ return getTBAAInfo(Context.LongTy);
+ case BuiltinType::ULongLong:
+ return getTBAAInfo(Context.LongLongTy);
+ case BuiltinType::UInt128:
+ return getTBAAInfo(Context.Int128Ty);
+
+ // Treat all other builtin types as distinct types. This includes
+ // treating wchar_t, char16_t, and char32_t as distinct from their
+ // "underlying types".
+ default:
+ return MetadataCache[Ty] =
+ getTBAAInfoForNamedType(BTy->getName(Features), getChar());
+ }
+ }
+
+ // Handle pointers.
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct.
+ if (Ty->isPointerType())
+ return MetadataCache[Ty] = getTBAAInfoForNamedType("any pointer",
+ getChar());
+
+ // Enum types are distinct types. In C++ they have "underlying types",
+ // however they aren't related for TBAA.
+ if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ // In C mode, two anonymous enums are compatible iff their members
+ // are the same -- see C99 6.2.7p1. For now, be conservative. We could
+ // theoretically implement this by combining information about all the
+ // members into a single identifying MDNode.
+ if (!Features.CPlusPlus &&
+ ETy->getDecl()->getTypedefForAnonDecl())
+ return MetadataCache[Ty] = getChar();
+
+ // In C++ mode, types have linkage, so we can rely on the ODR and
+ // on their mangled names, if they're external.
+ // TODO: Is there a way to get a program-wide unique name for a
+ // decl with local linkage or no linkage?
+ if (Features.CPlusPlus &&
+ ETy->getDecl()->getLinkage() != ExternalLinkage)
+ return MetadataCache[Ty] = getChar();
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
+ Out.flush();
+ return MetadataCache[Ty] = getTBAAInfoForNamedType(OutName, getChar());
+ }
+
+ // For now, handle any other kind of type conservatively.
+ return MetadataCache[Ty] = getChar();
+}
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
new file mode 100644
index 000000000000..c4583473a0e0
--- /dev/null
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -0,0 +1,76 @@
+//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTBAA_H
+#define CLANG_CODEGEN_CODEGENTBAA_H
+
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+ class LLVMContext;
+ class MDNode;
+}
+
+namespace clang {
+ class ASTContext;
+ class LangOptions;
+ class MangleContext;
+ class QualType;
+ class Type;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+/// CodeGenTBAA - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTBAA {
+ ASTContext &Context;
+ llvm::LLVMContext& VMContext;
+ const LangOptions &Features;
+ MangleContext &MContext;
+
+ /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
+ llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+
+ llvm::MDNode *Root;
+ llvm::MDNode *Char;
+
+ /// getRoot - This is the mdnode for the root of the metadata type graph
+ /// for this translation unit.
+ llvm::MDNode *getRoot();
+
+ /// getChar - This is the mdnode for "char", which is special, and any types
+ /// considered to be equivalent to it.
+ llvm::MDNode *getChar();
+
+ llvm::MDNode *getTBAAInfoForNamedType(llvm::StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly = false);
+
+public:
+ CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const LangOptions &Features,
+ MangleContext &MContext);
+ ~CodeGenTBAA();
+
+ /// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
+ /// of the given type.
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 5ab65c5779b6..5254922f13a1 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -85,7 +85,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
T = Context.getCanonicalType(T);
// See if type is already cached.
- llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator
I = TypeCache.find(T.getTypePtr());
// If type is found in map and this is not a definition for a opaque
// place holder type then use it. Otherwise, convert type T.
@@ -228,7 +228,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::ULong:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
return llvm::IntegerType::get(getLLVMContext(),
@@ -252,7 +253,6 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::UndeducedAuto:
assert(0 && "Unexpected builtin type!");
break;
}
@@ -371,26 +371,30 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
const TagDecl *TD = cast<TagType>(Ty).getDecl();
const llvm::Type *Res = ConvertTagDeclType(TD);
- std::string TypeName(TD->getKindName());
- TypeName += '.';
+ llvm::SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << TD->getKindName() << '.';
// Name the codegen type after the typedef name
// if there is no tag type name available
- if (TD->getIdentifier())
+ if (TD->getIdentifier()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
- TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
- TD->getNameAsString();
- else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ if (TD->getDeclContext())
+ OS << TD->getQualifiedNameAsString();
+ else
+ TD->printName(OS);
+ } else if (const TypedefDecl *TDD = TD->getTypedefForAnonDecl()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
- TypeName += TdT->getDecl()->getDeclContext() ?
- TdT->getDecl()->getQualifiedNameAsString() :
- TdT->getDecl()->getNameAsString();
- else
- TypeName += "anon";
-
- TheModule.addTypeName(TypeName, Res);
+ if (TDD->getDeclContext())
+ OS << TDD->getQualifiedNameAsString();
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ TheModule.addTypeName(OS.str(), Res);
return Res;
}
@@ -424,9 +428,13 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
if (TDTI != TagDeclTypes.end())
return TDTI->second;
+ const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
+
// If this is still a forward declaration, just define an opaque
// type to use for this tagged decl.
- if (!TD->isDefinition()) {
+ // C++0x: If this is a enumeration type with fixed underlying type,
+ // consider it complete.
+ if (!TD->isDefinition() && !(ED && ED->isFixed())) {
llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
TagDeclTypes.insert(std::make_pair(Key, ResultType));
return ResultType;
@@ -434,8 +442,8 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
// Okay, this is a definition of a type. Compile the implementation now.
- if (TD->isEnum()) // Don't bother storing enums in TagDeclTypes.
- return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+ if (ED) // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(ED->getIntegerType());
// This decl could well be recursive. In this case, insert an opaque
// definition of this type, which the recursive uses will get. We will then
@@ -474,11 +482,20 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
return ResultHolder.get();
}
-/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+/// getCGRecordLayout - Return record layout info for the given record decl.
const CGRecordLayout &
-CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
- const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ if (!Layout) {
+ // Compute the type information.
+ ConvertTagDeclType(RD);
+
+ // Now try again.
+ Layout = CGRecordLayouts.lookup(Key);
+ }
+
assert(Layout && "Unable to find record layout information for type");
return *Layout;
}
@@ -506,11 +523,5 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
}
bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
-
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- ConvertTagDeclType(RD);
-
- const CGRecordLayout &Layout = getCGRecordLayout(RD);
- return Layout.isZeroInitializable();
+ return getCGRecordLayout(RD).isZeroInitializable();
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 1fc2153fcadb..41513daf17ca 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -88,14 +88,14 @@ private:
/// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
/// used instead of llvm::Type because it allows us to bypass potential
/// dangling type pointers due to type refinement on llvm side.
- llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+ llvm::DenseMap<const Type *, llvm::PATypeHolder> TypeCache;
/// ConvertNewType - Convert type T into a llvm::Type. Do not use this
/// method directly because it does not do any type caching. This method
/// is available only for ConvertType(). CovertType() is preferred
/// interface to convert type T into a llvm::Type.
const llvm::Type *ConvertNewType(QualType T);
-
+
/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
/// pointers that are referenced but have not been converted yet. This is
/// used to handle cyclic structures properly.
@@ -139,11 +139,11 @@ public:
static const TagType *VerifyFuncTypeComplete(const Type* T);
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
- /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
-
- const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
@@ -151,7 +151,7 @@ public:
/// getFunctionInfo - Get the function info for the specified function decl.
const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
-
+
const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
@@ -176,7 +176,7 @@ public:
/// pointers.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
-
+
/// getFunctionInfo - Get the function info for a function described by a
/// return type and argument types. If the calling convention is not
/// specified, the "C" calling convention will be used.
@@ -188,7 +188,7 @@ public:
const FunctionType::ExtInfo &Info);
/// Retrieves the ABI information for the given function signature.
- ///
+ ///
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
@@ -208,11 +208,11 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
bool IsRecursive);
-
+
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T);
-
+
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const CXXRecordDecl *RD);
diff --git a/lib/CodeGen/GlobalDecl.h b/lib/CodeGen/GlobalDecl.h
index 26dea402f4b6..c2f36d210bfc 100644
--- a/lib/CodeGen/GlobalDecl.h
+++ b/lib/CodeGen/GlobalDecl.h
@@ -15,9 +15,9 @@
#ifndef CLANG_CODEGEN_GLOBALDECL_H
#define CLANG_CODEGEN_GLOBALDECL_H
-#include "CGCXX.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/ABI.h"
namespace clang {
@@ -81,6 +81,12 @@ public:
GD.Value.setFromOpaqueValue(P);
return GD;
}
+
+ GlobalDecl getWithDecl(const Decl *D) {
+ GlobalDecl Result(*this);
+ Result.Value.setPointer(D);
+ return Result;
+ }
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index eefc530ccf18..95654a33a125 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -22,7 +22,7 @@
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
+#include <clang/AST/Mangle.h>
#include <clang/AST/Type.h>
#include <llvm/Target/TargetData.h>
#include <llvm/Value.h>
@@ -35,7 +35,6 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI {
private:
const llvm::IntegerType *PtrDiffTy;
protected:
- CodeGen::MangleContext MangleCtx;
bool IsARM;
// It's a little silly for us to cache this.
@@ -48,16 +47,13 @@ protected:
return PtrDiffTy;
}
- bool NeedsArrayCookie(QualType ElementType);
+ bool NeedsArrayCookie(const CXXNewExpr *expr);
+ bool NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType);
public:
ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
- CGCXXABI(CGM), PtrDiffTy(0), MangleCtx(getContext(), CGM.getDiags()),
- IsARM(IsARM) { }
-
- CodeGen::MangleContext &getMangleContext() {
- return MangleCtx;
- }
+ CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
bool isZeroInitializable(const MemberPointerType *MPT);
@@ -83,7 +79,8 @@ public:
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
- llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -111,14 +108,19 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- CharUnits GetArrayCookieSize(QualType ElementType);
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -143,12 +145,14 @@ public:
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
- CharUnits GetArrayCookieSize(QualType ElementType);
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
@@ -352,11 +356,11 @@ ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
bool DerivedToBase =
E->getCastKind() == CK_DerivedToBaseMemberPointer;
- const CXXRecordDecl *BaseDecl, *DerivedDecl;
+ const CXXRecordDecl *DerivedDecl;
if (DerivedToBase)
- DerivedDecl = SrcDecl, BaseDecl = DestDecl;
+ DerivedDecl = SrcDecl;
else
- BaseDecl = SrcDecl, DerivedDecl = DestDecl;
+ DerivedDecl = DestDecl;
llvm::Constant *Adj =
CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
@@ -490,21 +494,13 @@ ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
/*Packed=*/false);
}
-llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+llvm::Constant *
+ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
// Itanium C++ ABI 2.3:
// A pointer to data member is an offset from the base address of
// the class object containing it, represented as a ptrdiff_t
-
- QualType ClassType = getContext().getTypeDeclType(FD->getParent());
- const llvm::StructType *ClassLTy =
- cast<llvm::StructType>(CGM.getTypes().ConvertType(ClassType));
-
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(FD->getParent());
- unsigned FieldNo = RL.getLLVMFieldNo(FD);
- uint64_t Offset =
- CGM.getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
-
- return llvm::ConstantInt::get(getPtrDiffTy(), Offset);
+ return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
@@ -799,67 +795,49 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
/************************** Array allocation cookies **************************/
-bool ItaniumCXXABI::NeedsArrayCookie(QualType ElementType) {
- ElementType = getContext().getBaseElementType(ElementType);
- const RecordType *RT = ElementType->getAs<RecordType>();
- if (!RT) return false;
-
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- // If the class has a non-trivial destructor, it always needs a cookie.
- if (!RD->hasTrivialDestructor()) return true;
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ expr->getAllocatedType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
// If the class's usual deallocation function takes two arguments,
- // it needs a cookie. Otherwise we don't need a cookie.
- const CXXMethodDecl *UsualDeallocationFunction = 0;
-
- // Usual deallocation functions of this form are always found on the
- // class.
- //
- // FIXME: what exactly is this code supposed to do if there's an
- // ambiguity? That's possible with using declarations.
- DeclarationName OpName =
- getContext().DeclarationNames.getCXXOperatorName(OO_Array_Delete);
- DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = RD->lookup(OpName); Op != OpEnd; ++Op) {
- const CXXMethodDecl *Delete =
- cast<CXXMethodDecl>((*Op)->getUnderlyingDecl());
-
- if (Delete->isUsualDeallocationFunction()) {
- UsualDeallocationFunction = Delete;
- break;
- }
- }
-
- // No usual deallocation function, we don't need a cookie.
- if (!UsualDeallocationFunction)
- return false;
-
- // The usual deallocation function doesn't take a size_t argument,
- // so we don't need a cookie.
- if (UsualDeallocationFunction->getNumParams() == 1)
- return false;
-
- assert(UsualDeallocationFunction->getNumParams() == 2 &&
- "Unexpected deallocation function type!");
- return true;
-}
-
-CharUnits ItaniumCXXABI::GetArrayCookieSize(QualType ElementType) {
- if (!NeedsArrayCookie(ElementType))
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ elementType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
return CharUnits::Zero();
- // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+ // Padding is the maximum of sizeof(size_t) and alignof(elementType)
ASTContext &Ctx = getContext();
return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
- Ctx.getTypeAlignInChars(ElementType));
+ Ctx.getTypeAlignInChars(expr->getAllocatedType()));
}
llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(ElementType));
+ assert(NeedsArrayCookie(expr));
unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
@@ -892,6 +870,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType,
llvm::Value *&NumElements,
llvm::Value *&AllocPtr,
@@ -901,7 +880,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(ElementType)) {
+ if (!NeedsArrayCookie(expr, ElementType)) {
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
NumElements = 0;
CookieSize = CharUnits::Zero();
@@ -932,8 +911,8 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
}
-CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
- if (!NeedsArrayCookie(ElementType))
+CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
return CharUnits::Zero();
// On ARM, the cookie is always:
@@ -949,8 +928,9 @@ CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(ElementType));
+ assert(NeedsArrayCookie(expr));
// NewPtr is a char*.
@@ -983,6 +963,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType,
llvm::Value *&NumElements,
llvm::Value *&AllocPtr,
@@ -992,7 +973,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(ElementType)) {
+ if (!NeedsArrayCookie(expr, ElementType)) {
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
NumElements = 0;
CookieSize = CharUnits::Zero();
@@ -1005,7 +986,6 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
// The cookie size is always 2 * sizeof(size_t).
CookieSize = 2 * SizeSize;
- CharUnits NumElementsOffset = CookieSize - SizeSize;
// The allocated pointer is the input ptr, minus that amount.
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
@@ -1021,3 +1001,168 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
}
+/*********************** Static local initialization **************************/
+
+static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // int __cxa_guard_acquire(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_release(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_abort(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+}
+
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+/// The ARM code here follows the Itanium code closely enough that we
+/// just special-case it at particular places.
+void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // We only need to use thread-safe statics for local variables;
+ // global initialization is always single-threaded.
+ bool ThreadsafeStatics = (getContext().getLangOptions().ThreadsafeStatics &&
+ D.isLocalVarDecl());
+
+ // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
+ const llvm::IntegerType *GuardTy
+ = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+ const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
+
+ // Create the guard variable.
+ llvm::SmallString<256> GuardVName;
+ llvm::raw_svector_ostream Out(GuardVName);
+ getMangleContext().mangleItaniumGuardVariable(&D, Out);
+ Out.flush();
+
+ // Just absorb linkage and visibility from the variable.
+ llvm::GlobalVariable *GuardVariable =
+ new llvm::GlobalVariable(CGM.getModule(), GuardTy,
+ false, GV->getLinkage(),
+ llvm::ConstantInt::get(GuardTy, 0),
+ GuardVName.str());
+ GuardVariable->setVisibility(GV->getVisibility());
+
+ // Test whether the variable has completed initialization.
+ llvm::Value *IsInitialized;
+
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4- byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ if (IsARM) {
+ llvm::Value *V = Builder.CreateLoad(GuardVariable);
+ V = Builder.CreateAnd(V, Builder.getInt32(1));
+ IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ // Itanium C++ ABI 3.3.2:
+ // The following is pseudo-code showing how these functions can be used:
+ // if (obj_guard.first_byte == 0) {
+ // if ( __cxa_guard_acquire (&obj_guard) ) {
+ // try {
+ // ... initialize the object ...;
+ // } catch (...) {
+ // __cxa_guard_abort (&obj_guard);
+ // throw;
+ // }
+ // ... queue object destructor with __cxa_atexit() ...;
+ // __cxa_guard_release (&obj_guard);
+ // }
+ // }
+ } else {
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy = Builder.getInt8PtrTy();
+ llvm::Value *V =
+ Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
+
+ IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+ }
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+
+ CGF.EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (ThreadsafeStatics) {
+ // Call __cxa_guard_acquire.
+ llvm::Value *V
+ = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
+
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
+
+ CGF.EmitBlock(InitBlock);
+ }
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, GV);
+
+ if (ThreadsafeStatics) {
+ // Pop the guard-abort cleanup if we pushed one.
+ CGF.PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable);
+ } else {
+ Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
+ }
+
+ CGF.EmitBlock(EndBlock);
+}
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
deleted file mode 100644
index e1988743b7f9..000000000000
--- a/lib/CodeGen/Mangle.cpp
+++ /dev/null
@@ -1,2515 +0,0 @@
-//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Implements C++ name mangling according to the Itanium C++ ABI,
-// which is used in GCC 3.2 and newer (and many compilers that are
-// ABI-compatible with GCC):
-//
-// http://www.codesourcery.com/public/cxx-abi/abi.html
-//
-//===----------------------------------------------------------------------===//
-#include "Mangle.h"
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/DeclTemplate.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "CGVTables.h"
-
-#define MANGLE_CHECKER 0
-
-#if MANGLE_CHECKER
-#include <cxxabi.h>
-#endif
-
-using namespace clang;
-using namespace CodeGen;
-
-MiscNameMangler::MiscNameMangler(MangleContext &C,
- llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res) { }
-
-void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) {
- // Mangle the context of the block.
- // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
- // desired. Come up with a better mangling scheme.
- const DeclContext *DC = BD->getDeclContext();
- while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
- DC = DC->getParent();
- if (DC->isFunctionOrMethod()) {
- Out << "__";
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else {
- const NamedDecl *ND = cast<NamedDecl>(DC);
- if (IdentifierInfo *II = ND->getIdentifier())
- Out << II->getName();
- else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) {
- llvm::SmallString<64> Buffer;
- Context.mangleCXXDtor(D, GD.getDtorType(), Buffer);
- Out << Buffer;
- }
- else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) {
- llvm::SmallString<64> Buffer;
- Context.mangleCXXCtor(D, GD.getCtorType(), Buffer);
- Out << Buffer;
- }
- else {
- // FIXME: We were doing a mangleUnqualifiedName() before, but that's
- // a private member of a class that will soon itself be private to the
- // Itanium C++ ABI object. What should we do now? Right now, I'm just
- // calling the mangleName() method on the MangleContext; is there a
- // better way?
- llvm::SmallString<64> Buffer;
- Context.mangleName(ND, Buffer);
- Out << Buffer;
- }
- }
- Out << "_block_invoke_" << Context.getBlockId(BD, true);
- } else {
- Out << "__block_global_" << Context.getBlockId(BD, false);
- }
-}
-
-void MiscNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Name;
- llvm::raw_svector_ostream OS(Name);
-
- const ObjCContainerDecl *CD =
- dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
- assert (CD && "Missing container decl in GetNameForMethod");
- OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
- if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
- OS << '(' << CID << ')';
- OS << ' ' << MD->getSelector().getAsString() << ']';
-
- Out << OS.str().size() << OS.str();
-}
-
-namespace {
-
-static const DeclContext *GetLocalClassFunctionDeclContext(
- const DeclContext *DC) {
- if (isa<CXXRecordDecl>(DC)) {
- while (!DC->isNamespace() && !DC->isTranslationUnit() &&
- !isa<FunctionDecl>(DC))
- DC = DC->getParent();
- if (isa<FunctionDecl>(DC))
- return DC;
- }
- return 0;
-}
-
-static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
- assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
- "Passed in decl is not a ctor or dtor!");
-
- if (const TemplateDecl *TD = MD->getPrimaryTemplate()) {
- MD = cast<CXXMethodDecl>(TD->getTemplatedDecl());
-
- assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
- "Templated decl is not a ctor or dtor!");
- }
-
- return MD;
-}
-
-static const unsigned UnknownArity = ~0U;
-
-/// CXXNameMangler - Manage the mangling of a single name.
-class CXXNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
-
- const CXXMethodDecl *Structor;
- unsigned StructorType;
-
- /// SeqID - The next subsitution sequence number.
- unsigned SeqID;
-
- llvm::DenseMap<uintptr_t, unsigned> Substitutions;
-
- ASTContext &getASTContext() const { return Context.getASTContext(); }
-
-public:
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { }
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
- const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
- SeqID(0) { }
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
- const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
- SeqID(0) { }
-
-#if MANGLE_CHECKER
- ~CXXNameMangler() {
- if (Out.str()[0] == '\01')
- return;
-
- int status = 0;
- char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
- assert(status == 0 && "Could not demangle mangled name!");
- free(result);
- }
-#endif
- llvm::raw_svector_ostream &getStream() { return Out; }
-
- void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
- void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
- void mangleNumber(const llvm::APSInt &I);
- void mangleNumber(int64_t Number);
- void mangleFloat(const llvm::APFloat &F);
- void mangleFunctionEncoding(const FunctionDecl *FD);
- void mangleName(const NamedDecl *ND);
- void mangleType(QualType T);
- void mangleNameOrStandardSubstitution(const NamedDecl *ND);
-
-private:
- bool mangleSubstitution(const NamedDecl *ND);
- bool mangleSubstitution(QualType T);
- bool mangleSubstitution(TemplateName Template);
- bool mangleSubstitution(uintptr_t Ptr);
-
- bool mangleStandardSubstitution(const NamedDecl *ND);
-
- void addSubstitution(const NamedDecl *ND) {
- ND = cast<NamedDecl>(ND->getCanonicalDecl());
-
- addSubstitution(reinterpret_cast<uintptr_t>(ND));
- }
- void addSubstitution(QualType T);
- void addSubstitution(TemplateName Template);
- void addSubstitution(uintptr_t Ptr);
-
- void mangleUnresolvedScope(NestedNameSpecifier *Qualifier);
- void mangleUnresolvedName(NestedNameSpecifier *Qualifier,
- DeclarationName Name,
- unsigned KnownArity = UnknownArity);
-
- void mangleName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void mangleUnqualifiedName(const NamedDecl *ND) {
- mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
- }
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
- unsigned KnownArity);
- void mangleUnscopedName(const NamedDecl *ND);
- void mangleUnscopedTemplateName(const TemplateDecl *ND);
- void mangleUnscopedTemplateName(TemplateName);
- void mangleSourceName(const IdentifierInfo *II);
- void mangleLocalName(const NamedDecl *ND);
- void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
- bool NoFunction=false);
- void mangleNestedName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void manglePrefix(const DeclContext *DC, bool NoFunction=false);
- void mangleTemplatePrefix(const TemplateDecl *ND);
- void mangleTemplatePrefix(TemplateName Template);
- void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
- void mangleQualifiers(Qualifiers Quals);
-
- void mangleObjCMethodName(const ObjCMethodDecl *MD);
-
- // Declare manglers for every type class.
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
-#include "clang/AST/TypeNodes.def"
-
- void mangleType(const TagType*);
- void mangleType(TemplateName);
- void mangleBareFunctionType(const FunctionType *T,
- bool MangleReturnType);
-
- void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
- void mangleMemberExpr(const Expr *Base, bool IsArrow,
- NestedNameSpecifier *Qualifier,
- DeclarationName Name,
- unsigned KnownArity);
- void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
- void mangleCXXCtorType(CXXCtorType T);
- void mangleCXXDtorType(CXXDtorType T);
-
- void mangleTemplateArgs(const ExplicitTemplateArgumentList &TemplateArgs);
- void mangleTemplateArgs(TemplateName Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgumentList &AL);
- void mangleTemplateArg(const NamedDecl *P, const TemplateArgument &A);
-
- void mangleTemplateParameter(unsigned Index);
-};
-}
-
-static bool isInCLinkageSpecification(const Decl *D) {
- D = D->getCanonicalDecl();
- for (const DeclContext *DC = D->getDeclContext();
- !DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
- return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
- }
-
- return false;
-}
-
-bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
- // In C, functions with no attributes never need to be mangled. Fastpath them.
- if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
- return false;
-
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (D->hasAttr<AsmLabelAttr>())
- return true;
-
- // Clang's "overloadable" attribute extension to C/C++ implies name mangling
- // (always) as does passing a C++ member function and a function
- // whose name is not a simple identifier.
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
- !FD->getDeclName().isIdentifier()))
- return true;
-
- // Otherwise, no mangling is done outside C++ mode.
- if (!getASTContext().getLangOptions().CPlusPlus)
- return false;
-
- // Variables at global scope with non-internal linkage are not mangled
- if (!FD) {
- const DeclContext *DC = D->getDeclContext();
- // Check for extern variable declared locally.
- if (DC->isFunctionOrMethod() && D->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
- if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
- return false;
- }
-
- // Class members are always mangled.
- if (D->getDeclContext()->isRecord())
- return true;
-
- // C functions and "main" are not mangled.
- if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
- return false;
-
- return true;
-}
-
-void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
- // If we have an asm name, then we use it as the mangling.
- Out << '\01'; // LLVM IR Marker for __asm("foo")
- Out << ALA->getLabel();
- return;
- }
-
- // <mangled-name> ::= _Z <encoding>
- // ::= <data name>
- // ::= <special-name>
- Out << Prefix;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- mangleName(VD);
- else
- mangleName(cast<FieldDecl>(D));
-}
-
-void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
- // <encoding> ::= <function name> <bare-function-type>
- mangleName(FD);
-
- // Don't mangle in the type if this isn't a decl we should typically mangle.
- if (!Context.shouldMangleDeclName(FD))
- return;
-
- // Whether the mangling of a function type includes the return type depends on
- // the context and the nature of the function. The rules for deciding whether
- // the return type is included are:
- //
- // 1. Template functions (names or types) have return types encoded, with
- // the exceptions listed below.
- // 2. Function types not appearing as part of a function name mangling,
- // e.g. parameters, pointer types, etc., have return type encoded, with the
- // exceptions listed below.
- // 3. Non-template function names do not have return types encoded.
- //
- // The exceptions mentioned in (1) and (2) above, for which the return type is
- // never included, are
- // 1. Constructors.
- // 2. Destructors.
- // 3. Conversion operator functions, e.g. operator int.
- bool MangleReturnType = false;
- if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
- if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
- isa<CXXConversionDecl>(FD)))
- MangleReturnType = true;
-
- // Mangle the type of the primary template.
- FD = PrimaryTemplate->getTemplatedDecl();
- }
-
- // Do the canonicalization out here because parameter types can
- // undergo additional canonicalization (e.g. array decay).
- FunctionType *FT = cast<FunctionType>(Context.getASTContext()
- .getCanonicalType(FD->getType()));
-
- mangleBareFunctionType(FT, MangleReturnType);
-}
-
-static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
- while (isa<LinkageSpecDecl>(DC)) {
- DC = DC->getParent();
- }
-
- return DC;
-}
-
-/// isStd - Return whether a given namespace is the 'std' namespace.
-static bool isStd(const NamespaceDecl *NS) {
- if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit())
- return false;
-
- const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
- return II && II->isStr("std");
-}
-
-// isStdNamespace - Return whether a given decl context is a toplevel 'std'
-// namespace.
-static bool isStdNamespace(const DeclContext *DC) {
- if (!DC->isNamespace())
- return false;
-
- return isStd(cast<NamespaceDecl>(DC));
-}
-
-static const TemplateDecl *
-isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
- // Check if we have a function template.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
- if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
- TemplateArgs = FD->getTemplateSpecializationArgs();
- return TD;
- }
- }
-
- // Check if we have a class template.
- if (const ClassTemplateSpecializationDecl *Spec =
- dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
- TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
- }
-
- return 0;
-}
-
-void CXXNameMangler::mangleName(const NamedDecl *ND) {
- // <name> ::= <nested-name>
- // ::= <unscoped-name>
- // ::= <unscoped-template-name> <template-args>
- // ::= <local-name>
- //
- const DeclContext *DC = ND->getDeclContext();
-
- if (GetLocalClassFunctionDeclContext(DC)) {
- mangleLocalName(ND);
- return;
- }
-
- // If this is an extern variable declared locally, the relevant DeclContext
- // is that of the containing namespace, or the translation unit.
- if (isa<FunctionDecl>(DC) && ND->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
-
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
-
- if (DC->isTranslationUnit() || isStdNamespace(DC)) {
- // Check if we have a template.
- const TemplateArgumentList *TemplateArgs = 0;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
- mangleUnscopedTemplateName(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
- return;
- }
-
- mangleUnscopedName(ND);
- return;
- }
-
- if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
- mangleLocalName(ND);
- return;
- }
-
- mangleNestedName(ND, DC);
-}
-void CXXNameMangler::mangleName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- const DeclContext *DC = IgnoreLinkageSpecDecls(TD->getDeclContext());
-
- if (DC->isTranslationUnit() || isStdNamespace(DC)) {
- mangleUnscopedTemplateName(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
- } else {
- mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
- }
-}
-
-void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
- // <unscoped-name> ::= <unqualified-name>
- // ::= St <unqualified-name> # ::std::
- if (isStdNamespace(ND->getDeclContext()))
- Out << "St";
-
- mangleUnqualifiedName(ND);
-}
-
-void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
- // <unscoped-template-name> ::= <unscoped-name>
- // ::= <substitution>
- if (mangleSubstitution(ND))
- return;
-
- // <template-template-param> ::= <template-param>
- if (const TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(ND)) {
- mangleTemplateParameter(TTP->getIndex());
- return;
- }
-
- mangleUnscopedName(ND->getTemplatedDecl());
- addSubstitution(ND);
-}
-
-void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
- // <unscoped-template-name> ::= <unscoped-name>
- // ::= <substitution>
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleUnscopedTemplateName(TD);
-
- if (mangleSubstitution(Template))
- return;
-
- // FIXME: How to cope with operators here?
- DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
- assert(Dependent && "Not a dependent template name?");
- if (!Dependent->isIdentifier()) {
- // FIXME: We can't possibly know the arity of the operator here!
- Diagnostic &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot mangle dependent operator name");
- Diags.Report(FullSourceLoc(), DiagID);
- return;
- }
-
- mangleSourceName(Dependent->getIdentifier());
- addSubstitution(Template);
-}
-
-void CXXNameMangler::mangleFloat(const llvm::APFloat &F) {
- // TODO: avoid this copy with careful stream management.
- llvm::SmallString<20> Buffer;
- F.bitcastToAPInt().toString(Buffer, 16, false);
- Out.write(Buffer.data(), Buffer.size());
-}
-
-void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
- if (Value.isSigned() && Value.isNegative()) {
- Out << 'n';
- Value.abs().print(Out, true);
- } else
- Value.print(Out, Value.isSigned());
-}
-
-void CXXNameMangler::mangleNumber(int64_t Number) {
- // <number> ::= [n] <non-negative decimal integer>
- if (Number < 0) {
- Out << 'n';
- Number = -Number;
- }
-
- Out << Number;
-}
-
-void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
- // <call-offset> ::= h <nv-offset> _
- // ::= v <v-offset> _
- // <nv-offset> ::= <offset number> # non-virtual base override
- // <v-offset> ::= <offset number> _ <virtual offset number>
- // # virtual base override, with vcall offset
- if (!Virtual) {
- Out << 'h';
- mangleNumber(NonVirtual);
- Out << '_';
- return;
- }
-
- Out << 'v';
- mangleNumber(NonVirtual);
- Out << '_';
- mangleNumber(Virtual);
- Out << '_';
-}
-
-void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
- Qualifier = getASTContext().getCanonicalNestedNameSpecifier(Qualifier);
- switch (Qualifier->getKind()) {
- case NestedNameSpecifier::Global:
- // nothing
- break;
- case NestedNameSpecifier::Namespace:
- mangleName(Qualifier->getAsNamespace());
- break;
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::TypeSpecWithTemplate: {
- const Type *QTy = Qualifier->getAsType();
-
- if (const TemplateSpecializationType *TST =
- dyn_cast<TemplateSpecializationType>(QTy)) {
- if (!mangleSubstitution(QualType(TST, 0))) {
- mangleTemplatePrefix(TST->getTemplateName());
-
- // FIXME: GCC does not appear to mangle the template arguments when
- // the template in question is a dependent template name. Should we
- // emulate that badness?
- mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
- TST->getNumArgs());
- addSubstitution(QualType(TST, 0));
- }
- } else {
- // We use the QualType mangle type variant here because it handles
- // substitutions.
- mangleType(QualType(QTy, 0));
- }
- }
- break;
- case NestedNameSpecifier::Identifier:
- // Member expressions can have these without prefixes.
- if (Qualifier->getPrefix())
- mangleUnresolvedScope(Qualifier->getPrefix());
- mangleSourceName(Qualifier->getAsIdentifier());
- break;
- }
-}
-
-/// Mangles a name which was not resolved to a specific entity.
-void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
- DeclarationName Name,
- unsigned KnownArity) {
- if (Qualifier)
- mangleUnresolvedScope(Qualifier);
- // FIXME: ambiguity of unqualified lookup with ::
-
- mangleUnqualifiedName(0, Name, KnownArity);
-}
-
-static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
- assert(RD->isAnonymousStructOrUnion() &&
- "Expected anonymous struct or union!");
-
- for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I) {
- const FieldDecl *FD = *I;
-
- if (FD->getIdentifier())
- return FD;
-
- if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
- if (const FieldDecl *NamedDataMember =
- FindFirstNamedDataMember(RT->getDecl()))
- return NamedDataMember;
- }
- }
-
- // We didn't find a named data member.
- return 0;
-}
-
-void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
- DeclarationName Name,
- unsigned KnownArity) {
- // <unqualified-name> ::= <operator-name>
- // ::= <ctor-dtor-name>
- // ::= <source-name>
- switch (Name.getNameKind()) {
- case DeclarationName::Identifier: {
- if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
- // We must avoid conflicts between internally- and externally-
- // linked variable declaration names in the same TU.
- // This naming convention is the same as that followed by GCC, though it
- // shouldn't actually matter.
- if (ND && isa<VarDecl>(ND) && ND->getLinkage() == InternalLinkage &&
- ND->getDeclContext()->isFileContext())
- Out << 'L';
-
- mangleSourceName(II);
- break;
- }
-
- // Otherwise, an anonymous entity. We must have a declaration.
- assert(ND && "mangling empty name without declaration");
-
- if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
- if (NS->isAnonymousNamespace()) {
- // This is how gcc mangles these names.
- Out << "12_GLOBAL__N_1";
- break;
- }
- }
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
- // We must have an anonymous union or struct declaration.
- const RecordDecl *RD =
- cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
-
- // Itanium C++ ABI 5.1.2:
- //
- // For the purposes of mangling, the name of an anonymous union is
- // considered to be the name of the first named data member found by a
- // pre-order, depth-first, declaration-order walk of the data members of
- // the anonymous union. If there is no such data member (i.e., if all of
- // the data members in the union are unnamed), then there is no way for
- // a program to refer to the anonymous union, and there is therefore no
- // need to mangle its name.
- const FieldDecl *FD = FindFirstNamedDataMember(RD);
-
- // It's actually possible for various reasons for us to get here
- // with an empty anonymous struct / union. Fortunately, it
- // doesn't really matter what name we generate.
- if (!FD) break;
- assert(FD->getIdentifier() && "Data member name isn't an identifier!");
-
- mangleSourceName(FD->getIdentifier());
- break;
- }
-
- // We must have an anonymous struct.
- const TagDecl *TD = cast<TagDecl>(ND);
- if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
- assert(TD->getDeclContext() == D->getDeclContext() &&
- "Typedef should not be in another decl context!");
- assert(D->getDeclName().getAsIdentifierInfo() &&
- "Typedef was not named!");
- mangleSourceName(D->getDeclName().getAsIdentifierInfo());
- break;
- }
-
- // Get a unique id for the anonymous struct.
- uint64_t AnonStructId = Context.getAnonymousStructId(TD);
-
- // Mangle it as a source name in the form
- // [n] $_<id>
- // where n is the length of the string.
- llvm::SmallString<8> Str;
- Str += "$_";
- Str += llvm::utostr(AnonStructId);
-
- Out << Str.size();
- Out << Str.str();
- break;
- }
-
- case DeclarationName::ObjCZeroArgSelector:
- case DeclarationName::ObjCOneArgSelector:
- case DeclarationName::ObjCMultiArgSelector:
- assert(false && "Can't mangle Objective-C selector names here!");
- break;
-
- case DeclarationName::CXXConstructorName:
- if (ND == Structor)
- // If the named decl is the C++ constructor we're mangling, use the type
- // we were given.
- mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
- else
- // Otherwise, use the complete constructor name. This is relevant if a
- // class with a constructor is declared within a constructor.
- mangleCXXCtorType(Ctor_Complete);
- break;
-
- case DeclarationName::CXXDestructorName:
- if (ND == Structor)
- // If the named decl is the C++ destructor we're mangling, use the type we
- // were given.
- mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
- else
- // Otherwise, use the complete destructor name. This is relevant if a
- // class with a destructor is declared within a destructor.
- mangleCXXDtorType(Dtor_Complete);
- break;
-
- case DeclarationName::CXXConversionFunctionName:
- // <operator-name> ::= cv <type> # (cast)
- Out << "cv";
- mangleType(Context.getASTContext().getCanonicalType(Name.getCXXNameType()));
- break;
-
- case DeclarationName::CXXOperatorName: {
- unsigned Arity;
- if (ND) {
- Arity = cast<FunctionDecl>(ND)->getNumParams();
-
- // If we have a C++ member function, we need to include the 'this' pointer.
- // FIXME: This does not make sense for operators that are static, but their
- // names stay the same regardless of the arity (operator new for instance).
- if (isa<CXXMethodDecl>(ND))
- Arity++;
- } else
- Arity = KnownArity;
-
- mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
- break;
- }
-
- case DeclarationName::CXXLiteralOperatorName:
- // FIXME: This mangling is not yet official.
- Out << "li";
- mangleSourceName(Name.getCXXLiteralIdentifier());
- break;
-
- case DeclarationName::CXXUsingDirective:
- assert(false && "Can't mangle a using directive name!");
- break;
- }
-}
-
-void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
- // <source-name> ::= <positive length number> <identifier>
- // <number> ::= [n] <non-negative decimal integer>
- // <identifier> ::= <unqualified source code identifier>
- Out << II->getLength() << II->getName();
-}
-
-void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
- const DeclContext *DC,
- bool NoFunction) {
- // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
- // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
-
- Out << 'N';
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
- mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
-
- // Check if we have a template.
- const TemplateArgumentList *TemplateArgs = 0;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
- mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
- }
- else {
- manglePrefix(DC, NoFunction);
- mangleUnqualifiedName(ND);
- }
-
- Out << 'E';
-}
-void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
-
- Out << 'N';
-
- mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
-
- Out << 'E';
-}
-
-void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
- // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
- // := Z <function encoding> E s [<discriminator>]
- // <discriminator> := _ <non-negative number>
- const DeclContext *DC = ND->getDeclContext();
- Out << 'Z';
-
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
- mangleObjCMethodName(MD);
- }
- else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
- mangleFunctionEncoding(cast<FunctionDecl>(CDC));
- Out << 'E';
- mangleNestedName(ND, DC, true /*NoFunction*/);
-
- // FIXME. This still does not cover all cases.
- unsigned disc;
- if (Context.getNextDiscriminator(ND, disc)) {
- if (disc < 10)
- Out << '_' << disc;
- else
- Out << "__" << disc << '_';
- }
-
- return;
- }
- else
- mangleFunctionEncoding(cast<FunctionDecl>(DC));
-
- Out << 'E';
- mangleUnqualifiedName(ND);
-}
-
-void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
- // <prefix> ::= <prefix> <unqualified-name>
- // ::= <template-prefix> <template-args>
- // ::= <template-param>
- // ::= # empty
- // ::= <substitution>
-
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
-
- if (DC->isTranslationUnit())
- return;
-
- if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
- manglePrefix(DC->getParent(), NoFunction);
- llvm::SmallString<64> Name;
- Context.mangleBlock(GlobalDecl(), Block, Name);
- Out << Name.size() << Name;
- return;
- }
-
- if (mangleSubstitution(cast<NamedDecl>(DC)))
- return;
-
- // Check if we have a template.
- const TemplateArgumentList *TemplateArgs = 0;
- if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
- mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
- }
- else if(NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
- return;
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else {
- manglePrefix(DC->getParent(), NoFunction);
- mangleUnqualifiedName(cast<NamedDecl>(DC));
- }
-
- addSubstitution(cast<NamedDecl>(DC));
-}
-
-void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
- // <template-prefix> ::= <prefix> <template unqualified-name>
- // ::= <template-param>
- // ::= <substitution>
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleTemplatePrefix(TD);
-
- if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
- mangleUnresolvedScope(Qualified->getQualifier());
-
- if (OverloadedTemplateStorage *Overloaded
- = Template.getAsOverloadedTemplate()) {
- mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
- UnknownArity);
- return;
- }
-
- DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
- assert(Dependent && "Unknown template name kind?");
- mangleUnresolvedScope(Dependent->getQualifier());
- mangleUnscopedTemplateName(Template);
-}
-
-void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
- // <template-prefix> ::= <prefix> <template unqualified-name>
- // ::= <template-param>
- // ::= <substitution>
- // <template-template-param> ::= <template-param>
- // <substitution>
-
- if (mangleSubstitution(ND))
- return;
-
- // <template-template-param> ::= <template-param>
- if (const TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(ND)) {
- mangleTemplateParameter(TTP->getIndex());
- return;
- }
-
- manglePrefix(ND->getDeclContext());
- mangleUnqualifiedName(ND->getTemplatedDecl());
- addSubstitution(ND);
-}
-
-/// Mangles a template name under the production <type>. Required for
-/// template template arguments.
-/// <type> ::= <class-enum-type>
-/// ::= <template-param>
-/// ::= <substitution>
-void CXXNameMangler::mangleType(TemplateName TN) {
- if (mangleSubstitution(TN))
- return;
-
- TemplateDecl *TD = 0;
-
- switch (TN.getKind()) {
- case TemplateName::QualifiedTemplate:
- TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
- goto HaveDecl;
-
- case TemplateName::Template:
- TD = TN.getAsTemplateDecl();
- goto HaveDecl;
-
- HaveDecl:
- if (isa<TemplateTemplateParmDecl>(TD))
- mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
- else
- mangleName(TD);
- break;
-
- case TemplateName::OverloadedTemplate:
- llvm_unreachable("can't mangle an overloaded template name as a <type>");
- break;
-
- case TemplateName::DependentTemplate: {
- const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
- assert(Dependent->isIdentifier());
-
- // <class-enum-type> ::= <name>
- // <name> ::= <nested-name>
- mangleUnresolvedScope(Dependent->getQualifier());
- mangleSourceName(Dependent->getIdentifier());
- break;
- }
-
- }
-
- addSubstitution(TN);
-}
-
-void
-CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
- switch (OO) {
- // <operator-name> ::= nw # new
- case OO_New: Out << "nw"; break;
- // ::= na # new[]
- case OO_Array_New: Out << "na"; break;
- // ::= dl # delete
- case OO_Delete: Out << "dl"; break;
- // ::= da # delete[]
- case OO_Array_Delete: Out << "da"; break;
- // ::= ps # + (unary)
- // ::= pl # + (binary or unknown)
- case OO_Plus:
- Out << (Arity == 1? "ps" : "pl"); break;
- // ::= ng # - (unary)
- // ::= mi # - (binary or unknown)
- case OO_Minus:
- Out << (Arity == 1? "ng" : "mi"); break;
- // ::= ad # & (unary)
- // ::= an # & (binary or unknown)
- case OO_Amp:
- Out << (Arity == 1? "ad" : "an"); break;
- // ::= de # * (unary)
- // ::= ml # * (binary or unknown)
- case OO_Star:
- // Use binary when unknown.
- Out << (Arity == 1? "de" : "ml"); break;
- // ::= co # ~
- case OO_Tilde: Out << "co"; break;
- // ::= dv # /
- case OO_Slash: Out << "dv"; break;
- // ::= rm # %
- case OO_Percent: Out << "rm"; break;
- // ::= or # |
- case OO_Pipe: Out << "or"; break;
- // ::= eo # ^
- case OO_Caret: Out << "eo"; break;
- // ::= aS # =
- case OO_Equal: Out << "aS"; break;
- // ::= pL # +=
- case OO_PlusEqual: Out << "pL"; break;
- // ::= mI # -=
- case OO_MinusEqual: Out << "mI"; break;
- // ::= mL # *=
- case OO_StarEqual: Out << "mL"; break;
- // ::= dV # /=
- case OO_SlashEqual: Out << "dV"; break;
- // ::= rM # %=
- case OO_PercentEqual: Out << "rM"; break;
- // ::= aN # &=
- case OO_AmpEqual: Out << "aN"; break;
- // ::= oR # |=
- case OO_PipeEqual: Out << "oR"; break;
- // ::= eO # ^=
- case OO_CaretEqual: Out << "eO"; break;
- // ::= ls # <<
- case OO_LessLess: Out << "ls"; break;
- // ::= rs # >>
- case OO_GreaterGreater: Out << "rs"; break;
- // ::= lS # <<=
- case OO_LessLessEqual: Out << "lS"; break;
- // ::= rS # >>=
- case OO_GreaterGreaterEqual: Out << "rS"; break;
- // ::= eq # ==
- case OO_EqualEqual: Out << "eq"; break;
- // ::= ne # !=
- case OO_ExclaimEqual: Out << "ne"; break;
- // ::= lt # <
- case OO_Less: Out << "lt"; break;
- // ::= gt # >
- case OO_Greater: Out << "gt"; break;
- // ::= le # <=
- case OO_LessEqual: Out << "le"; break;
- // ::= ge # >=
- case OO_GreaterEqual: Out << "ge"; break;
- // ::= nt # !
- case OO_Exclaim: Out << "nt"; break;
- // ::= aa # &&
- case OO_AmpAmp: Out << "aa"; break;
- // ::= oo # ||
- case OO_PipePipe: Out << "oo"; break;
- // ::= pp # ++
- case OO_PlusPlus: Out << "pp"; break;
- // ::= mm # --
- case OO_MinusMinus: Out << "mm"; break;
- // ::= cm # ,
- case OO_Comma: Out << "cm"; break;
- // ::= pm # ->*
- case OO_ArrowStar: Out << "pm"; break;
- // ::= pt # ->
- case OO_Arrow: Out << "pt"; break;
- // ::= cl # ()
- case OO_Call: Out << "cl"; break;
- // ::= ix # []
- case OO_Subscript: Out << "ix"; break;
-
- // ::= qu # ?
- // The conditional operator can't be overloaded, but we still handle it when
- // mangling expressions.
- case OO_Conditional: Out << "qu"; break;
-
- case OO_None:
- case NUM_OVERLOADED_OPERATORS:
- assert(false && "Not an overloaded operator");
- break;
- }
-}
-
-void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
- // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
- if (Quals.hasRestrict())
- Out << 'r';
- if (Quals.hasVolatile())
- Out << 'V';
- if (Quals.hasConst())
- Out << 'K';
-
- if (Quals.hasAddressSpace()) {
- // Extension:
- //
- // <type> ::= U <address-space-number>
- //
- // where <address-space-number> is a source name consisting of 'AS'
- // followed by the address space <number>.
- llvm::SmallString<64> ASString;
- ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
- Out << 'U' << ASString.size() << ASString;
- }
-
- // FIXME: For now, just drop all extension qualifiers on the floor.
-}
-
-void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Buffer;
- MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
- Out << Buffer;
-}
-
-void CXXNameMangler::mangleType(QualType T) {
- // Only operate on the canonical type!
- T = Context.getASTContext().getCanonicalType(T);
-
- bool IsSubstitutable = T.hasLocalQualifiers() || !isa<BuiltinType>(T);
- if (IsSubstitutable && mangleSubstitution(T))
- return;
-
- if (Qualifiers Quals = T.getLocalQualifiers()) {
- mangleQualifiers(Quals);
- // Recurse: even if the qualified type isn't yet substitutable,
- // the unqualified type might be.
- mangleType(T.getLocalUnqualifiedType());
- } else {
- switch (T->getTypeClass()) {
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT) \
- case Type::CLASS: \
- llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
- return;
-#define TYPE(CLASS, PARENT) \
- case Type::CLASS: \
- mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
- break;
-#include "clang/AST/TypeNodes.def"
- }
- }
-
- // Add the substitution.
- if (IsSubstitutable)
- addSubstitution(T);
-}
-
-void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
- if (!mangleStandardSubstitution(ND))
- mangleName(ND);
-}
-
-void CXXNameMangler::mangleType(const BuiltinType *T) {
- // <type> ::= <builtin-type>
- // <builtin-type> ::= v # void
- // ::= w # wchar_t
- // ::= b # bool
- // ::= c # char
- // ::= a # signed char
- // ::= h # unsigned char
- // ::= s # short
- // ::= t # unsigned short
- // ::= i # int
- // ::= j # unsigned int
- // ::= l # long
- // ::= m # unsigned long
- // ::= x # long long, __int64
- // ::= y # unsigned long long, __int64
- // ::= n # __int128
- // UNSUPPORTED: ::= o # unsigned __int128
- // ::= f # float
- // ::= d # double
- // ::= e # long double, __float80
- // UNSUPPORTED: ::= g # __float128
- // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
- // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
- // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
- // UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
- // ::= Di # char32_t
- // ::= Ds # char16_t
- // ::= u <source-name> # vendor extended type
- // From our point of view, std::nullptr_t is a builtin, but as far as mangling
- // is concerned, it's a type called std::nullptr_t.
- switch (T->getKind()) {
- case BuiltinType::Void: Out << 'v'; break;
- case BuiltinType::Bool: Out << 'b'; break;
- case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
- case BuiltinType::UChar: Out << 'h'; break;
- case BuiltinType::UShort: Out << 't'; break;
- case BuiltinType::UInt: Out << 'j'; break;
- case BuiltinType::ULong: Out << 'm'; break;
- case BuiltinType::ULongLong: Out << 'y'; break;
- case BuiltinType::UInt128: Out << 'o'; break;
- case BuiltinType::SChar: Out << 'a'; break;
- case BuiltinType::WChar: Out << 'w'; break;
- case BuiltinType::Char16: Out << "Ds"; break;
- case BuiltinType::Char32: Out << "Di"; break;
- case BuiltinType::Short: Out << 's'; break;
- case BuiltinType::Int: Out << 'i'; break;
- case BuiltinType::Long: Out << 'l'; break;
- case BuiltinType::LongLong: Out << 'x'; break;
- case BuiltinType::Int128: Out << 'n'; break;
- case BuiltinType::Float: Out << 'f'; break;
- case BuiltinType::Double: Out << 'd'; break;
- case BuiltinType::LongDouble: Out << 'e'; break;
- case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
-
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- assert(false &&
- "Overloaded and dependent types shouldn't get to name mangling");
- break;
- case BuiltinType::UndeducedAuto:
- assert(0 && "Should not see undeduced auto here");
- break;
- case BuiltinType::ObjCId: Out << "11objc_object"; break;
- case BuiltinType::ObjCClass: Out << "10objc_class"; break;
- case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
- }
-}
-
-// <type> ::= <function-type>
-// <function-type> ::= F [Y] <bare-function-type> E
-void CXXNameMangler::mangleType(const FunctionProtoType *T) {
- Out << 'F';
- // FIXME: We don't have enough information in the AST to produce the 'Y'
- // encoding for extern "C" function types.
- mangleBareFunctionType(T, /*MangleReturnType=*/true);
- Out << 'E';
-}
-void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
- llvm_unreachable("Can't mangle K&R function prototypes");
-}
-void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
- bool MangleReturnType) {
- // We should never be mangling something without a prototype.
- const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
-
- // <bare-function-type> ::= <signature type>+
- if (MangleReturnType)
- mangleType(Proto->getResultType());
-
- if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
- // <builtin-type> ::= v # void
- Out << 'v';
- return;
- }
-
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- mangleType(*Arg);
-
- // <builtin-type> ::= z # ellipsis
- if (Proto->isVariadic())
- Out << 'z';
-}
-
-// <type> ::= <class-enum-type>
-// <class-enum-type> ::= <name>
-void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
- mangleName(T->getDecl());
-}
-
-// <type> ::= <class-enum-type>
-// <class-enum-type> ::= <name>
-void CXXNameMangler::mangleType(const EnumType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void CXXNameMangler::mangleType(const RecordType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void CXXNameMangler::mangleType(const TagType *T) {
- mangleName(T->getDecl());
-}
-
-// <type> ::= <array-type>
-// <array-type> ::= A <positive dimension number> _ <element type>
-// ::= A [<dimension expression>] _ <element type>
-void CXXNameMangler::mangleType(const ConstantArrayType *T) {
- Out << 'A' << T->getSize() << '_';
- mangleType(T->getElementType());
-}
-void CXXNameMangler::mangleType(const VariableArrayType *T) {
- Out << 'A';
- mangleExpression(T->getSizeExpr());
- Out << '_';
- mangleType(T->getElementType());
-}
-void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
- Out << 'A';
- mangleExpression(T->getSizeExpr());
- Out << '_';
- mangleType(T->getElementType());
-}
-void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
- Out << 'A' << '_';
- mangleType(T->getElementType());
-}
-
-// <type> ::= <pointer-to-member-type>
-// <pointer-to-member-type> ::= M <class type> <member type>
-void CXXNameMangler::mangleType(const MemberPointerType *T) {
- Out << 'M';
- mangleType(QualType(T->getClass(), 0));
- QualType PointeeType = T->getPointeeType();
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
- mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
- mangleType(FPT);
-
- // Itanium C++ ABI 5.1.8:
- //
- // The type of a non-static member function is considered to be different,
- // for the purposes of substitution, from the type of a namespace-scope or
- // static member function whose type appears similar. The types of two
- // non-static member functions are considered to be different, for the
- // purposes of substitution, if the functions are members of different
- // classes. In other words, for the purposes of substitution, the class of
- // which the function is a member is considered part of the type of
- // function.
-
- // We increment the SeqID here to emulate adding an entry to the
- // substitution table. We can't actually add it because we don't want this
- // particular function type to be substituted.
- ++SeqID;
- } else
- mangleType(PointeeType);
-}
-
-// <type> ::= <template-param>
-void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
- mangleTemplateParameter(T->getIndex());
-}
-
-// <type> ::= P <type> # pointer-to
-void CXXNameMangler::mangleType(const PointerType *T) {
- Out << 'P';
- mangleType(T->getPointeeType());
-}
-void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
- Out << 'P';
- mangleType(T->getPointeeType());
-}
-
-// <type> ::= R <type> # reference-to
-void CXXNameMangler::mangleType(const LValueReferenceType *T) {
- Out << 'R';
- mangleType(T->getPointeeType());
-}
-
-// <type> ::= O <type> # rvalue reference-to (C++0x)
-void CXXNameMangler::mangleType(const RValueReferenceType *T) {
- Out << 'O';
- mangleType(T->getPointeeType());
-}
-
-// <type> ::= C <type> # complex pair (C 2000)
-void CXXNameMangler::mangleType(const ComplexType *T) {
- Out << 'C';
- mangleType(T->getElementType());
-}
-
-// GNU extension: vector types
-// <type> ::= <vector-type>
-// <vector-type> ::= Dv <positive dimension number> _
-// <extended element type>
-// ::= Dv [<dimension expression>] _ <element type>
-// <extended element type> ::= <element type>
-// ::= p # AltiVec vector pixel
-void CXXNameMangler::mangleType(const VectorType *T) {
- Out << "Dv" << T->getNumElements() << '_';
- if (T->getAltiVecSpecific() == VectorType::Pixel)
- Out << 'p';
- else if (T->getAltiVecSpecific() == VectorType::Bool)
- Out << 'b';
- else
- mangleType(T->getElementType());
-}
-void CXXNameMangler::mangleType(const ExtVectorType *T) {
- mangleType(static_cast<const VectorType*>(T));
-}
-void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
- Out << "Dv";
- mangleExpression(T->getSizeExpr());
- Out << '_';
- mangleType(T->getElementType());
-}
-
-void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
- mangleSourceName(T->getDecl()->getIdentifier());
-}
-
-void CXXNameMangler::mangleType(const ObjCObjectType *T) {
- // We don't allow overloading by different protocol qualification,
- // so mangling them isn't necessary.
- mangleType(T->getBaseType());
-}
-
-void CXXNameMangler::mangleType(const BlockPointerType *T) {
- Out << "U13block_pointer";
- mangleType(T->getPointeeType());
-}
-
-void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
- // Mangle injected class name types as if the user had written the
- // specialization out fully. It may not actually be possible to see
- // this mangling, though.
- mangleType(T->getInjectedSpecializationType());
-}
-
-void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
- if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
- mangleName(TD, T->getArgs(), T->getNumArgs());
- } else {
- if (mangleSubstitution(QualType(T, 0)))
- return;
-
- mangleTemplatePrefix(T->getTemplateName());
-
- // FIXME: GCC does not appear to mangle the template arguments when
- // the template in question is a dependent template name. Should we
- // emulate that badness?
- mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
- addSubstitution(QualType(T, 0));
- }
-}
-
-void CXXNameMangler::mangleType(const DependentNameType *T) {
- // Typename types are always nested
- Out << 'N';
- mangleUnresolvedScope(T->getQualifier());
- mangleSourceName(T->getIdentifier());
- Out << 'E';
-}
-
-void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
- // Dependently-scoped template types are always nested
- Out << 'N';
-
- // TODO: avoid making this TemplateName.
- TemplateName Prefix =
- getASTContext().getDependentTemplateName(T->getQualifier(),
- T->getIdentifier());
- mangleTemplatePrefix(Prefix);
-
- // FIXME: GCC does not appear to mangle the template arguments when
- // the template in question is a dependent template name. Should we
- // emulate that badness?
- mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
- Out << 'E';
-}
-
-void CXXNameMangler::mangleType(const TypeOfType *T) {
- // FIXME: this is pretty unsatisfactory, but there isn't an obvious
- // "extension with parameters" mangling.
- Out << "u6typeof";
-}
-
-void CXXNameMangler::mangleType(const TypeOfExprType *T) {
- // FIXME: this is pretty unsatisfactory, but there isn't an obvious
- // "extension with parameters" mangling.
- Out << "u6typeof";
-}
-
-void CXXNameMangler::mangleType(const DecltypeType *T) {
- Expr *E = T->getUnderlyingExpr();
-
- // type ::= Dt <expression> E # decltype of an id-expression
- // # or class member access
- // ::= DT <expression> E # decltype of an expression
-
- // This purports to be an exhaustive list of id-expressions and
- // class member accesses. Note that we do not ignore parentheses;
- // parentheses change the semantics of decltype for these
- // expressions (and cause the mangler to use the other form).
- if (isa<DeclRefExpr>(E) ||
- isa<MemberExpr>(E) ||
- isa<UnresolvedLookupExpr>(E) ||
- isa<DependentScopeDeclRefExpr>(E) ||
- isa<CXXDependentScopeMemberExpr>(E) ||
- isa<UnresolvedMemberExpr>(E))
- Out << "Dt";
- else
- Out << "DT";
- mangleExpression(E);
- Out << 'E';
-}
-
-void CXXNameMangler::mangleIntegerLiteral(QualType T,
- const llvm::APSInt &Value) {
- // <expr-primary> ::= L <type> <value number> E # integer literal
- Out << 'L';
-
- mangleType(T);
- if (T->isBooleanType()) {
- // Boolean values are encoded as 0/1.
- Out << (Value.getBoolValue() ? '1' : '0');
- } else {
- mangleNumber(Value);
- }
- Out << 'E';
-
-}
-
-/// Mangles a member expression. Implicit accesses are not handled,
-/// but that should be okay, because you shouldn't be able to
-/// make an implicit access in a function template declaration.
-void CXXNameMangler::mangleMemberExpr(const Expr *Base,
- bool IsArrow,
- NestedNameSpecifier *Qualifier,
- DeclarationName Member,
- unsigned Arity) {
- // gcc-4.4 uses 'dt' for dot expressions, which is reasonable.
- // OTOH, gcc also mangles the name as an expression.
- Out << (IsArrow ? "pt" : "dt");
- mangleExpression(Base);
- mangleUnresolvedName(Qualifier, Member, Arity);
-}
-
-void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
- // <expression> ::= <unary operator-name> <expression>
- // ::= <binary operator-name> <expression> <expression>
- // ::= <trinary operator-name> <expression> <expression> <expression>
- // ::= cl <expression>* E # call
- // ::= cv <type> expression # conversion with one argument
- // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
- // ::= st <type> # sizeof (a type)
- // ::= at <type> # alignof (a type)
- // ::= <template-param>
- // ::= <function-param>
- // ::= sr <type> <unqualified-name> # dependent name
- // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
- // ::= sZ <template-param> # size of a parameter pack
- // ::= <expr-primary>
- // <expr-primary> ::= L <type> <value number> E # integer literal
- // ::= L <type <value float> E # floating literal
- // ::= L <mangled-name> E # external name
- switch (E->getStmtClass()) {
- case Expr::NoStmtClass:
-#define EXPR(Type, Base)
-#define STMT(Type, Base) \
- case Expr::Type##Class:
-#include "clang/AST/StmtNodes.inc"
- // fallthrough
-
- // These all can only appear in local or variable-initialization
- // contexts and so should never appear in a mangling.
- case Expr::AddrLabelExprClass:
- case Expr::BlockDeclRefExprClass:
- case Expr::CXXThisExprClass:
- case Expr::DesignatedInitExprClass:
- case Expr::ImplicitValueInitExprClass:
- case Expr::InitListExprClass:
- case Expr::ParenListExprClass:
- case Expr::CXXScalarValueInitExprClass:
- llvm_unreachable("unexpected statement kind");
- break;
-
- // FIXME: invent manglings for all these.
- case Expr::BlockExprClass:
- case Expr::CXXPseudoDestructorExprClass:
- case Expr::ChooseExprClass:
- case Expr::CompoundLiteralExprClass:
- case Expr::ExtVectorElementExprClass:
- case Expr::ObjCEncodeExprClass:
- case Expr::ObjCImplicitSetterGetterRefExprClass:
- case Expr::ObjCIsaExprClass:
- case Expr::ObjCIvarRefExprClass:
- case Expr::ObjCMessageExprClass:
- case Expr::ObjCPropertyRefExprClass:
- case Expr::ObjCProtocolExprClass:
- case Expr::ObjCSelectorExprClass:
- case Expr::ObjCStringLiteralClass:
- case Expr::ObjCSuperExprClass:
- case Expr::OffsetOfExprClass:
- case Expr::PredefinedExprClass:
- case Expr::ShuffleVectorExprClass:
- case Expr::StmtExprClass:
- case Expr::TypesCompatibleExprClass:
- case Expr::UnaryTypeTraitExprClass:
- case Expr::VAArgExprClass: {
- // As bad as this diagnostic is, it's better than crashing.
- Diagnostic &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot yet mangle expression type %0");
- Diags.Report(FullSourceLoc(E->getExprLoc(),
- getASTContext().getSourceManager()),
- DiagID)
- << E->getStmtClassName() << E->getSourceRange();
- break;
- }
-
- case Expr::CXXDefaultArgExprClass:
- mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
- break;
-
- case Expr::CXXMemberCallExprClass: // fallthrough
- case Expr::CallExprClass: {
- const CallExpr *CE = cast<CallExpr>(E);
- Out << "cl";
- mangleExpression(CE->getCallee(), CE->getNumArgs());
- for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
- mangleExpression(CE->getArg(I));
- Out << 'E';
- break;
- }
-
- case Expr::CXXNewExprClass: {
- // Proposal from David Vandervoorde, 2010.06.30
- const CXXNewExpr *New = cast<CXXNewExpr>(E);
- if (New->isGlobalNew()) Out << "gs";
- Out << (New->isArray() ? "na" : "nw");
- for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
- E = New->placement_arg_end(); I != E; ++I)
- mangleExpression(*I);
- Out << '_';
- mangleType(New->getAllocatedType());
- if (New->hasInitializer()) {
- Out << "pi";
- for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
- E = New->constructor_arg_end(); I != E; ++I)
- mangleExpression(*I);
- }
- Out << 'E';
- break;
- }
-
- case Expr::MemberExprClass: {
- const MemberExpr *ME = cast<MemberExpr>(E);
- mangleMemberExpr(ME->getBase(), ME->isArrow(),
- ME->getQualifier(), ME->getMemberDecl()->getDeclName(),
- Arity);
- break;
- }
-
- case Expr::UnresolvedMemberExprClass: {
- const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
- mangleMemberExpr(ME->getBase(), ME->isArrow(),
- ME->getQualifier(), ME->getMemberName(),
- Arity);
- if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getExplicitTemplateArgs());
- break;
- }
-
- case Expr::CXXDependentScopeMemberExprClass: {
- const CXXDependentScopeMemberExpr *ME
- = cast<CXXDependentScopeMemberExpr>(E);
- mangleMemberExpr(ME->getBase(), ME->isArrow(),
- ME->getQualifier(), ME->getMember(),
- Arity);
- if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getExplicitTemplateArgs());
- break;
- }
-
- case Expr::UnresolvedLookupExprClass: {
- // The ABI doesn't cover how to mangle overload sets, so we mangle
- // using something as close as possible to the original lookup
- // expression.
- const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
- mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
- if (ULE->hasExplicitTemplateArgs())
- mangleTemplateArgs(ULE->getExplicitTemplateArgs());
- break;
- }
-
- case Expr::CXXUnresolvedConstructExprClass: {
- const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
- unsigned N = CE->arg_size();
-
- Out << "cv";
- mangleType(CE->getType());
- if (N != 1) Out << '_';
- for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
- if (N != 1) Out << 'E';
- break;
- }
-
- case Expr::CXXTemporaryObjectExprClass:
- case Expr::CXXConstructExprClass: {
- const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
- unsigned N = CE->getNumArgs();
-
- Out << "cv";
- mangleType(CE->getType());
- if (N != 1) Out << '_';
- for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
- if (N != 1) Out << 'E';
- break;
- }
-
- case Expr::SizeOfAlignOfExprClass: {
- const SizeOfAlignOfExpr *SAE = cast<SizeOfAlignOfExpr>(E);
- if (SAE->isSizeOf()) Out << 's';
- else Out << 'a';
- if (SAE->isArgumentType()) {
- Out << 't';
- mangleType(SAE->getArgumentType());
- } else {
- Out << 'z';
- mangleExpression(SAE->getArgumentExpr());
- }
- break;
- }
-
- case Expr::CXXThrowExprClass: {
- const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
-
- // Proposal from David Vandervoorde, 2010.06.30
- if (TE->getSubExpr()) {
- Out << "tw";
- mangleExpression(TE->getSubExpr());
- } else {
- Out << "tr";
- }
- break;
- }
-
- case Expr::CXXTypeidExprClass: {
- const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
-
- // Proposal from David Vandervoorde, 2010.06.30
- if (TIE->isTypeOperand()) {
- Out << "ti";
- mangleType(TIE->getTypeOperand());
- } else {
- Out << "te";
- mangleExpression(TIE->getExprOperand());
- }
- break;
- }
-
- case Expr::CXXDeleteExprClass: {
- const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
-
- // Proposal from David Vandervoorde, 2010.06.30
- if (DE->isGlobalDelete()) Out << "gs";
- Out << (DE->isArrayForm() ? "da" : "dl");
- mangleExpression(DE->getArgument());
- break;
- }
-
- case Expr::UnaryOperatorClass: {
- const UnaryOperator *UO = cast<UnaryOperator>(E);
- mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
- /*Arity=*/1);
- mangleExpression(UO->getSubExpr());
- break;
- }
-
- case Expr::ArraySubscriptExprClass: {
- const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
-
- // Array subscript is treated as a syntactically wierd form of
- // binary operator.
- Out << "ix";
- mangleExpression(AE->getLHS());
- mangleExpression(AE->getRHS());
- break;
- }
-
- case Expr::CompoundAssignOperatorClass: // fallthrough
- case Expr::BinaryOperatorClass: {
- const BinaryOperator *BO = cast<BinaryOperator>(E);
- mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
- /*Arity=*/2);
- mangleExpression(BO->getLHS());
- mangleExpression(BO->getRHS());
- break;
- }
-
- case Expr::ConditionalOperatorClass: {
- const ConditionalOperator *CO = cast<ConditionalOperator>(E);
- mangleOperatorName(OO_Conditional, /*Arity=*/3);
- mangleExpression(CO->getCond());
- mangleExpression(CO->getLHS(), Arity);
- mangleExpression(CO->getRHS(), Arity);
- break;
- }
-
- case Expr::ImplicitCastExprClass: {
- mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr(), Arity);
- break;
- }
-
- case Expr::CStyleCastExprClass:
- case Expr::CXXStaticCastExprClass:
- case Expr::CXXDynamicCastExprClass:
- case Expr::CXXReinterpretCastExprClass:
- case Expr::CXXConstCastExprClass:
- case Expr::CXXFunctionalCastExprClass: {
- const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
- Out << "cv";
- mangleType(ECE->getType());
- mangleExpression(ECE->getSubExpr());
- break;
- }
-
- case Expr::CXXOperatorCallExprClass: {
- const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
- unsigned NumArgs = CE->getNumArgs();
- mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
- // Mangle the arguments.
- for (unsigned i = 0; i != NumArgs; ++i)
- mangleExpression(CE->getArg(i));
- break;
- }
-
- case Expr::ParenExprClass:
- mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
- break;
-
- case Expr::DeclRefExprClass: {
- const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
-
- switch (D->getKind()) {
- default:
- // <expr-primary> ::= L <mangled-name> E # external name
- Out << 'L';
- mangle(D, "_Z");
- Out << 'E';
- break;
-
- case Decl::EnumConstant: {
- const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
- mangleIntegerLiteral(ED->getType(), ED->getInitVal());
- break;
- }
-
- case Decl::NonTypeTemplateParm: {
- const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
- mangleTemplateParameter(PD->getIndex());
- break;
- }
-
- }
-
- break;
- }
-
- case Expr::DependentScopeDeclRefExprClass: {
- const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
- NestedNameSpecifier *NNS = DRE->getQualifier();
- const Type *QTy = NNS->getAsType();
-
- // When we're dealing with a nested-name-specifier that has just a
- // dependent identifier in it, mangle that as a typename. FIXME:
- // It isn't clear that we ever actually want to have such a
- // nested-name-specifier; why not just represent it as a typename type?
- if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
- QTy = getASTContext().getDependentNameType(ETK_Typename,
- NNS->getPrefix(),
- NNS->getAsIdentifier())
- .getTypePtr();
- }
- assert(QTy && "Qualifier was not type!");
-
- // ::= sr <type> <unqualified-name> # dependent name
- // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
- Out << "sr";
- mangleType(QualType(QTy, 0));
- mangleUnqualifiedName(0, DRE->getDeclName(), Arity);
- if (DRE->hasExplicitTemplateArgs())
- mangleTemplateArgs(DRE->getExplicitTemplateArgs());
-
- break;
- }
-
- case Expr::CXXBindTemporaryExprClass:
- mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
- break;
-
- case Expr::CXXExprWithTemporariesClass:
- mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr(), Arity);
- break;
-
- case Expr::FloatingLiteralClass: {
- const FloatingLiteral *FL = cast<FloatingLiteral>(E);
- Out << 'L';
- mangleType(FL->getType());
- mangleFloat(FL->getValue());
- Out << 'E';
- break;
- }
-
- case Expr::CharacterLiteralClass:
- Out << 'L';
- mangleType(E->getType());
- Out << cast<CharacterLiteral>(E)->getValue();
- Out << 'E';
- break;
-
- case Expr::CXXBoolLiteralExprClass:
- Out << "Lb";
- Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
- Out << 'E';
- break;
-
- case Expr::IntegerLiteralClass: {
- llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
- if (E->getType()->isSignedIntegerType())
- Value.setIsSigned(true);
- mangleIntegerLiteral(E->getType(), Value);
- break;
- }
-
- case Expr::ImaginaryLiteralClass: {
- const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
- // Mangle as if a complex literal.
- // Proposal from David Vandervoorde, 2010.06.30.
- Out << 'L';
- mangleType(E->getType());
- if (const FloatingLiteral *Imag =
- dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
- // Mangle a floating-point zero of the appropriate type.
- mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
- Out << '_';
- mangleFloat(Imag->getValue());
- } else {
- Out << '0' << '_';
- llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
- if (IE->getSubExpr()->getType()->isSignedIntegerType())
- Value.setIsSigned(true);
- mangleNumber(Value);
- }
- Out << 'E';
- break;
- }
-
- case Expr::StringLiteralClass: {
- // Revised proposal from David Vandervoorde, 2010.07.15.
- Out << 'L';
- assert(isa<ConstantArrayType>(E->getType()));
- mangleType(E->getType());
- Out << 'E';
- break;
- }
-
- case Expr::GNUNullExprClass:
- // FIXME: should this really be mangled the same as nullptr?
- // fallthrough
-
- case Expr::CXXNullPtrLiteralExprClass: {
- // Proposal from David Vandervoorde, 2010.06.30, as
- // modified by ABI list discussion.
- Out << "LDnE";
- break;
- }
-
- }
-}
-
-void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
- // <ctor-dtor-name> ::= C1 # complete object constructor
- // ::= C2 # base object constructor
- // ::= C3 # complete object allocating constructor
- //
- switch (T) {
- case Ctor_Complete:
- Out << "C1";
- break;
- case Ctor_Base:
- Out << "C2";
- break;
- case Ctor_CompleteAllocating:
- Out << "C3";
- break;
- }
-}
-
-void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
- // <ctor-dtor-name> ::= D0 # deleting destructor
- // ::= D1 # complete object destructor
- // ::= D2 # base object destructor
- //
- switch (T) {
- case Dtor_Deleting:
- Out << "D0";
- break;
- case Dtor_Complete:
- Out << "D1";
- break;
- case Dtor_Base:
- Out << "D2";
- break;
- }
-}
-
-void CXXNameMangler::mangleTemplateArgs(
- const ExplicitTemplateArgumentList &TemplateArgs) {
- // <template-args> ::= I <template-arg>+ E
- Out << 'I';
- for (unsigned I = 0, E = TemplateArgs.NumTemplateArgs; I != E; ++I)
- mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[I].getArgument());
- Out << 'E';
-}
-
-void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
- NumTemplateArgs);
-
- // <template-args> ::= I <template-arg>+ E
- Out << 'I';
- for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(0, TemplateArgs[i]);
- Out << 'E';
-}
-
-void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgumentList &AL) {
- // <template-args> ::= I <template-arg>+ E
- Out << 'I';
- for (unsigned i = 0, e = AL.size(); i != e; ++i)
- mangleTemplateArg(PL.getParam(i), AL[i]);
- Out << 'E';
-}
-
-void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- // <template-args> ::= I <template-arg>+ E
- Out << 'I';
- for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
- Out << 'E';
-}
-
-void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
- const TemplateArgument &A) {
- // <template-arg> ::= <type> # type or template
- // ::= X <expression> E # expression
- // ::= <expr-primary> # simple expressions
- // ::= I <template-arg>* E # argument pack
- // ::= sp <expression> # pack expansion of (C++0x)
- switch (A.getKind()) {
- default:
- assert(0 && "Unknown template argument kind!");
- case TemplateArgument::Type:
- mangleType(A.getAsType());
- break;
- case TemplateArgument::Template:
- // This is mangled as <type>.
- mangleType(A.getAsTemplate());
- break;
- case TemplateArgument::Expression:
- Out << 'X';
- mangleExpression(A.getAsExpr());
- Out << 'E';
- break;
- case TemplateArgument::Integral:
- mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
- break;
- case TemplateArgument::Declaration: {
- assert(P && "Missing template parameter for declaration argument");
- // <expr-primary> ::= L <mangled-name> E # external name
-
- // Clang produces AST's where pointer-to-member-function expressions
- // and pointer-to-function expressions are represented as a declaration not
- // an expression. We compensate for it here to produce the correct mangling.
- NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
- const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
- bool compensateMangling = D->isCXXClassMember() &&
- !Parameter->getType()->isReferenceType();
- if (compensateMangling) {
- Out << 'X';
- mangleOperatorName(OO_Amp, 1);
- }
-
- Out << 'L';
- // References to external entities use the mangled name; if the name would
- // not normally be manged then mangle it as unqualified.
- //
- // FIXME: The ABI specifies that external names here should have _Z, but
- // gcc leaves this off.
- if (compensateMangling)
- mangle(D, "_Z");
- else
- mangle(D, "Z");
- Out << 'E';
-
- if (compensateMangling)
- Out << 'E';
-
- break;
- }
- }
-}
-
-void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
- // <template-param> ::= T_ # first template parameter
- // ::= T <parameter-2 non-negative number> _
- if (Index == 0)
- Out << "T_";
- else
- Out << 'T' << (Index - 1) << '_';
-}
-
-// <substitution> ::= S <seq-id> _
-// ::= S_
-bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
- // Try one of the standard substitutions first.
- if (mangleStandardSubstitution(ND))
- return true;
-
- ND = cast<NamedDecl>(ND->getCanonicalDecl());
- return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
-}
-
-bool CXXNameMangler::mangleSubstitution(QualType T) {
- if (!T.getCVRQualifiers()) {
- if (const RecordType *RT = T->getAs<RecordType>())
- return mangleSubstitution(RT->getDecl());
- }
-
- uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
-
- return mangleSubstitution(TypePtr);
-}
-
-bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return mangleSubstitution(TD);
-
- Template = Context.getASTContext().getCanonicalTemplateName(Template);
- return mangleSubstitution(
- reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
-}
-
-bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
- llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
- if (I == Substitutions.end())
- return false;
-
- unsigned SeqID = I->second;
- if (SeqID == 0)
- Out << "S_";
- else {
- SeqID--;
-
- // <seq-id> is encoded in base-36, using digits and upper case letters.
- char Buffer[10];
- char *BufferPtr = llvm::array_endof(Buffer);
-
- if (SeqID == 0) *--BufferPtr = '0';
-
- while (SeqID) {
- assert(BufferPtr > Buffer && "Buffer overflow!");
-
- char c = static_cast<char>(SeqID % 36);
-
- *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
- SeqID /= 36;
- }
-
- Out << 'S'
- << llvm::StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
- << '_';
- }
-
- return true;
-}
-
-static bool isCharType(QualType T) {
- if (T.isNull())
- return false;
-
- return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
- T->isSpecificBuiltinType(BuiltinType::Char_U);
-}
-
-/// isCharSpecialization - Returns whether a given type is a template
-/// specialization of a given name with a single argument of type char.
-static bool isCharSpecialization(QualType T, const char *Name) {
- if (T.isNull())
- return false;
-
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
-
- const ClassTemplateSpecializationDecl *SD =
- dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
- if (!SD)
- return false;
-
- if (!isStdNamespace(SD->getDeclContext()))
- return false;
-
- const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
- if (TemplateArgs.size() != 1)
- return false;
-
- if (!isCharType(TemplateArgs[0].getAsType()))
- return false;
-
- return SD->getIdentifier()->getName() == Name;
-}
-
-template <std::size_t StrLen>
-bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
- const char (&Str)[StrLen]) {
- if (!SD->getIdentifier()->isStr(Str))
- return false;
-
- const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
- if (TemplateArgs.size() != 2)
- return false;
-
- if (!isCharType(TemplateArgs[0].getAsType()))
- return false;
-
- if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
- return false;
-
- return true;
-}
-
-bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
- // <substitution> ::= St # ::std::
- if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
- if (isStd(NS)) {
- Out << "St";
- return true;
- }
- }
-
- if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
- if (!isStdNamespace(TD->getDeclContext()))
- return false;
-
- // <substitution> ::= Sa # ::std::allocator
- if (TD->getIdentifier()->isStr("allocator")) {
- Out << "Sa";
- return true;
- }
-
- // <<substitution> ::= Sb # ::std::basic_string
- if (TD->getIdentifier()->isStr("basic_string")) {
- Out << "Sb";
- return true;
- }
- }
-
- if (const ClassTemplateSpecializationDecl *SD =
- dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
- if (!isStdNamespace(SD->getDeclContext()))
- return false;
-
- // <substitution> ::= Ss # ::std::basic_string<char,
- // ::std::char_traits<char>,
- // ::std::allocator<char> >
- if (SD->getIdentifier()->isStr("basic_string")) {
- const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
-
- if (TemplateArgs.size() != 3)
- return false;
-
- if (!isCharType(TemplateArgs[0].getAsType()))
- return false;
-
- if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
- return false;
-
- if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
- return false;
-
- Out << "Ss";
- return true;
- }
-
- // <substitution> ::= Si # ::std::basic_istream<char,
- // ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_istream")) {
- Out << "Si";
- return true;
- }
-
- // <substitution> ::= So # ::std::basic_ostream<char,
- // ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_ostream")) {
- Out << "So";
- return true;
- }
-
- // <substitution> ::= Sd # ::std::basic_iostream<char,
- // ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_iostream")) {
- Out << "Sd";
- return true;
- }
- }
- return false;
-}
-
-void CXXNameMangler::addSubstitution(QualType T) {
- if (!T.getCVRQualifiers()) {
- if (const RecordType *RT = T->getAs<RecordType>()) {
- addSubstitution(RT->getDecl());
- return;
- }
- }
-
- uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
- addSubstitution(TypePtr);
-}
-
-void CXXNameMangler::addSubstitution(TemplateName Template) {
- if (TemplateDecl *TD = Template.getAsTemplateDecl())
- return addSubstitution(TD);
-
- Template = Context.getASTContext().getCanonicalTemplateName(Template);
- addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
-}
-
-void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
- assert(!Substitutions.count(Ptr) && "Substitution already exists!");
- Substitutions[Ptr] = SeqID++;
-}
-
-//
-
-/// \brief Mangles the name of the declaration D and emits that name to the
-/// given output stream.
-///
-/// If the declaration D requires a mangled name, this routine will emit that
-/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
-/// and this routine will return false. In this case, the caller should just
-/// emit the identifier of the declaration (\c D->getIdentifier()) as its
-/// name.
-void MangleContext::mangleName(const NamedDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
-
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- getASTContext().getSourceManager(),
- "Mangling declaration");
-
- CXXNameMangler Mangler(*this, Res);
- return Mangler.mangle(D);
-}
-
-void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &Res) {
- CXXNameMangler Mangler(*this, Res, D, Type);
- Mangler.mangle(D);
-}
-
-void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &Res) {
- CXXNameMangler Mangler(*this, Res, D, Type);
- Mangler.mangle(D);
-}
-
-void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD,
- llvm::SmallVectorImpl<char> &Res) {
- MiscNameMangler Mangler(*this, Res);
- Mangler.mangleBlock(GD, BD);
-}
-
-void MangleContext::mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= T <call-offset> <base encoding>
- // # base is the nominal target function of thunk
- // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
- // # base is the nominal target function of thunk
- // # first call-offset is 'this' adjustment
- // # second call-offset is result adjustment
-
- assert(!isa<CXXDestructorDecl>(MD) &&
- "Use mangleCXXDtor for destructor decls!");
-
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZT";
- if (!Thunk.Return.isEmpty())
- Mangler.getStream() << 'c';
-
- // Mangle the 'this' pointer adjustment.
- Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
-
- // Mangle the return pointer adjustment if there is one.
- if (!Thunk.Return.isEmpty())
- Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
- Thunk.Return.VBaseOffsetOffset);
-
- Mangler.mangleFunctionEncoding(MD);
-}
-
-void
-MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= T <call-offset> <base encoding>
- // # base is the nominal target function of thunk
-
- CXXNameMangler Mangler(*this, Res, DD, Type);
- Mangler.getStream() << "_ZT";
-
- // Mangle the 'this' pointer adjustment.
- Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
- ThisAdjustment.VCallOffsetOffset);
-
- Mangler.mangleFunctionEncoding(DD);
-}
-
-/// mangleGuardVariable - Returns the mangled name for a guard variable
-/// for the passed in VarDecl.
-void MangleContext::mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= GV <object name> # Guard variable for one-time
- // # initialization
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZGV";
- Mangler.mangleName(D);
-}
-
-void MangleContext::mangleReferenceTemporary(const VarDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
- // We match the GCC mangling here.
- // <special-name> ::= GR <object name>
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZGR";
- Mangler.mangleName(D);
-}
-
-void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= TV <type> # virtual table
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTV";
- Mangler.mangleNameOrStandardSubstitution(RD);
-}
-
-void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= TT <type> # VTT structure
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTT";
- Mangler.mangleNameOrStandardSubstitution(RD);
-}
-
-void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= TC <type> <offset number> _ <base type>
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTC";
- Mangler.mangleNameOrStandardSubstitution(RD);
- Mangler.getStream() << Offset;
- Mangler.getStream() << '_';
- Mangler.mangleNameOrStandardSubstitution(Type);
-}
-
-void MangleContext::mangleCXXRTTI(QualType Ty,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= TI <type> # typeinfo structure
- assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTI";
- Mangler.mangleType(Ty);
-}
-
-void MangleContext::mangleCXXRTTIName(QualType Ty,
- llvm::SmallVectorImpl<char> &Res) {
- // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTS";
- Mangler.mangleType(Ty);
-}
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
deleted file mode 100644
index 139f6c0377ec..000000000000
--- a/lib/CodeGen/Mangle.h
+++ /dev/null
@@ -1,177 +0,0 @@
-//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Implements C++ name mangling according to the Itanium C++ ABI,
-// which is used in GCC 3.2 and newer (and many compilers that are
-// ABI-compatible with GCC):
-//
-// http://www.codesourcery.com/public/cxx-abi/abi.html
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
-#define LLVM_CLANG_CODEGEN_MANGLE_H
-
-#include "CGCXX.h"
-#include "GlobalDecl.h"
-#include "clang/AST/Type.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace clang {
- class ASTContext;
- class BlockDecl;
- class CXXConstructorDecl;
- class CXXDestructorDecl;
- class CXXMethodDecl;
- class FunctionDecl;
- class NamedDecl;
- class ObjCMethodDecl;
- class VarDecl;
-
-namespace CodeGen {
- struct ThisAdjustment;
- struct ThunkInfo;
-
-/// MangleBuffer - a convenient class for storing a name which is
-/// either the result of a mangling or is a constant string with
-/// external memory ownership.
-class MangleBuffer {
-public:
- void setString(llvm::StringRef Ref) {
- String = Ref;
- }
-
- llvm::SmallVectorImpl<char> &getBuffer() {
- return Buffer;
- }
-
- llvm::StringRef getString() const {
- if (!String.empty()) return String;
- return Buffer.str();
- }
-
- operator llvm::StringRef() const {
- return getString();
- }
-
-private:
- llvm::StringRef String;
- llvm::SmallString<256> Buffer;
-};
-
-/// MangleContext - Context for tracking state which persists across multiple
-/// calls to the C++ name mangler.
-class MangleContext {
- ASTContext &Context;
- Diagnostic &Diags;
-
- llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
- unsigned Discriminator;
- llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
- llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
- llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
-
-public:
- explicit MangleContext(ASTContext &Context,
- Diagnostic &Diags)
- : Context(Context), Diags(Diags) { }
-
- virtual ~MangleContext() { }
-
- ASTContext &getASTContext() const { return Context; }
-
- Diagnostic &getDiags() const { return Diags; }
-
- void startNewFunction() { LocalBlockIds.clear(); }
-
- uint64_t getAnonymousStructId(const TagDecl *TD) {
- std::pair<llvm::DenseMap<const TagDecl *,
- uint64_t>::iterator, bool> Result =
- AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
- return Result.first->second;
- }
-
- unsigned getBlockId(const BlockDecl *BD, bool Local) {
- llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
- = Local? LocalBlockIds : GlobalBlockIds;
- std::pair<llvm::DenseMap<const BlockDecl *, unsigned>::iterator, bool>
- Result = BlockIds.insert(std::make_pair(BD, BlockIds.size()));
- return Result.first->second;
- }
-
- /// @name Mangler Entry Points
- /// @{
-
- virtual bool shouldMangleDeclName(const NamedDecl *D);
- virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
- virtual void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleReferenceTemporary(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &);
- void mangleBlock(GlobalDecl GD,
- const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
-
- void mangleInitDiscriminator() {
- Discriminator = 0;
- }
-
- bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
- unsigned &discriminator = Uniquifier[ND];
- if (!discriminator)
- discriminator = ++Discriminator;
- if (discriminator == 1)
- return false;
- disc = discriminator-2;
- return true;
- }
- /// @}
-};
-
-/// MiscNameMangler - Mangles Objective-C method names and blocks.
-class MiscNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
-
- ASTContext &getASTContext() const { return Context.getASTContext(); }
-
-public:
- MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res);
-
- llvm::raw_svector_ostream &getStream() { return Out; }
-
- void mangleBlock(GlobalDecl GD, const BlockDecl *BD);
- void mangleObjCMethodName(const ObjCMethodDecl *MD);
-};
-
-}
-}
-
-#endif
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 9407335e3283..3a63eba39741 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -16,107 +16,17 @@
#include "CGCXXABI.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclTemplate.h"
-#include "clang/AST/ExprCXX.h"
-#include "CGVTables.h"
using namespace clang;
using namespace CodeGen;
namespace {
-/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftCXXNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
-
- ASTContext &getASTContext() const { return Context.getASTContext(); }
-
-public:
- MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res) { }
-
- void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
- void mangleName(const NamedDecl *ND);
- void mangleFunctionEncoding(const FunctionDecl *FD);
- void mangleVariableEncoding(const VarDecl *VD);
- void mangleNumber(int64_t Number);
- void mangleType(QualType T);
-
-private:
- void mangleUnqualifiedName(const NamedDecl *ND) {
- mangleUnqualifiedName(ND, ND->getDeclName());
- }
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
- void mangleSourceName(const IdentifierInfo *II);
- void manglePostfix(const DeclContext *DC, bool NoFunction=false);
- void mangleOperatorName(OverloadedOperatorKind OO);
- void mangleQualifiers(Qualifiers Quals, bool IsMember);
-
- void mangleObjCMethodName(const ObjCMethodDecl *MD);
-
- // Declare manglers for every type class.
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
-#include "clang/AST/TypeNodes.def"
-
- void mangleType(const TagType*);
- void mangleType(const FunctionType *T, const FunctionDecl *D,
- bool IsStructor, bool IsInstMethod);
- void mangleType(const ArrayType *T, bool IsGlobal);
- void mangleExtraDimensions(QualType T);
- void mangleFunctionClass(const FunctionDecl *FD);
- void mangleCallingConvention(const FunctionType *T);
- void mangleThrowSpecification(const FunctionProtoType *T);
-
-};
-
-/// MicrosoftMangleContext - Overrides the default MangleContext for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftMangleContext : public MangleContext {
-public:
- MicrosoftMangleContext(ASTContext &Context,
- Diagnostic &Diags) : MangleContext(Context, Diags) { }
- virtual bool shouldMangleDeclName(const NamedDecl *D);
- virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
- virtual void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &);
-};
-
class MicrosoftCXXABI : public CGCXXABI {
- MicrosoftMangleContext MangleCtx;
public:
- MicrosoftCXXABI(CodeGenModule &CGM)
- : CGCXXABI(CGM), MangleCtx(CGM.getContext(), CGM.getDiags()) {}
-
- MicrosoftMangleContext &getMangleContext() {
- return MangleCtx;
- }
+ MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
@@ -145,1071 +55,31 @@ public:
EmitThisParam(CGF);
// TODO: 'for base' flag
}
-};
-
-}
-
-static bool isInCLinkageSpecification(const Decl *D) {
- D = D->getCanonicalDecl();
- for (const DeclContext *DC = D->getDeclContext();
- !DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
- return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
- }
-
- return false;
-}
-
-bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
- // In C, functions with no attributes never need to be mangled. Fastpath them.
- if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
- return false;
-
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (D->hasAttr<AsmLabelAttr>())
- return true;
-
- // Clang's "overloadable" attribute extension to C/C++ implies name mangling
- // (always) as does passing a C++ member function and a function
- // whose name is not a simple identifier.
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
- !FD->getDeclName().isIdentifier()))
- return true;
-
- // Otherwise, no mangling is done outside C++ mode.
- if (!getASTContext().getLangOptions().CPlusPlus)
- return false;
-
- // Variables at global scope with internal linkage are not mangled.
- if (!FD) {
- const DeclContext *DC = D->getDeclContext();
- if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
- return false;
- }
-
- // C functions and "main" are not mangled.
- if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
- return false;
-
- return true;
-}
-
-void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
- llvm::StringRef Prefix) {
- // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
- // Therefore it's really important that we don't decorate the
- // name with leading underscores or leading/trailing at signs. So, emit a
- // asm marker at the start so we get the name right.
- Out << '\01'; // LLVM IR Marker for __asm("foo")
-
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
- // If we have an asm name, then we use it as the mangling.
- Out << ALA->getLabel();
- return;
- }
-
- // <mangled-name> ::= ? <name> <type-encoding>
- Out << Prefix;
- mangleName(D);
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- mangleVariableEncoding(VD);
- // TODO: Fields? Can MSVC even mangle them?
-}
-
-void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
- // <type-encoding> ::= <function-class> <function-type>
-
- // Don't mangle in the type if this isn't a decl we should typically mangle.
- if (!Context.shouldMangleDeclName(FD))
- return;
-
- // We should never ever see a FunctionNoProtoType at this point.
- // We don't even know how to mangle their types anyway :).
- const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
-
- bool InStructor = false, InInstMethod = false;
- const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD) {
- if (MD->isInstance())
- InInstMethod = true;
- if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
- InStructor = true;
- }
-
- // First, the function class.
- mangleFunctionClass(FD);
-
- mangleType(FT, FD, InStructor, InInstMethod);
-}
-
-void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
- // <type-encoding> ::= <storage-class> <variable-type>
- // <storage-class> ::= 0 # private static member
- // ::= 1 # protected static member
- // ::= 2 # public static member
- // ::= 3 # global
- // ::= 4 # static local
-
- // The first character in the encoding (after the name) is the storage class.
- if (VD->isStaticDataMember()) {
- // If it's a static member, it also encodes the access level.
- switch (VD->getAccess()) {
- default:
- case AS_private: Out << '0'; break;
- case AS_protected: Out << '1'; break;
- case AS_public: Out << '2'; break;
- }
- }
- else if (!VD->isStaticLocal())
- Out << '3';
- else
- Out << '4';
- // Now mangle the type.
- // <variable-type> ::= <type> <cvr-qualifiers>
- // ::= <type> A # pointers, references, arrays
- // Pointers and references are odd. The type of 'int * const foo;' gets
- // mangled as 'QAHA' instead of 'PAHB', for example.
- QualType Ty = VD->getType();
- if (Ty->isPointerType() || Ty->isReferenceType()) {
- mangleType(Ty);
- Out << 'A';
- } else if (Ty->isArrayType()) {
- // Global arrays are funny, too.
- mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true);
- Out << 'A';
- } else {
- mangleType(Ty.getLocalUnqualifiedType());
- mangleQualifiers(Ty.getLocalQualifiers(), false);
- }
-}
-
-void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
- // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
- const DeclContext *DC = ND->getDeclContext();
-
- // Always start with the unqualified name.
- mangleUnqualifiedName(ND);
-
- // If this is an extern variable declared locally, the relevant DeclContext
- // is that of the containing namespace, or the translation unit.
- if (isa<FunctionDecl>(DC) && ND->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
-
- manglePostfix(DC);
-
- // Terminate the whole name with an '@'.
- Out << '@';
-}
-
-void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
- // <number> ::= [?] <decimal digit> # <= 9
- // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
- if (Number < 0) {
- Out << '?';
- Number = -Number;
- }
- if (Number >= 1 && Number <= 10) {
- Out << Number-1;
- } else {
- // We have to build up the encoding in reverse order, so it will come
- // out right when we write it out.
- char Encoding[16];
- char *EndPtr = Encoding+sizeof(Encoding);
- char *CurPtr = EndPtr;
- while (Number) {
- *--CurPtr = 'A' + (Number % 16);
- Number /= 16;
- }
- Out.write(CurPtr, EndPtr-CurPtr);
- Out << '@';
- }
-}
-
-void
-MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
- DeclarationName Name) {
- // <unqualified-name> ::= <operator-name>
- // ::= <ctor-dtor-name>
- // ::= <source-name>
- switch (Name.getNameKind()) {
- case DeclarationName::Identifier: {
- if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
- mangleSourceName(II);
- break;
- }
-
- // Otherwise, an anonymous entity. We must have a declaration.
- assert(ND && "mangling empty name without declaration");
-
- if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
- if (NS->isAnonymousNamespace()) {
- Out << "?A";
- break;
- }
- }
-
- // We must have an anonymous struct.
- const TagDecl *TD = cast<TagDecl>(ND);
- if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
- assert(TD->getDeclContext() == D->getDeclContext() &&
- "Typedef should not be in another decl context!");
- assert(D->getDeclName().getAsIdentifierInfo() &&
- "Typedef was not named!");
- mangleSourceName(D->getDeclName().getAsIdentifierInfo());
- break;
- }
-
- // When VC encounters an anonymous type with no tag and no typedef,
- // it literally emits '<unnamed-tag>'.
- Out << "<unnamed-tag>";
- break;
- }
-
- case DeclarationName::ObjCZeroArgSelector:
- case DeclarationName::ObjCOneArgSelector:
- case DeclarationName::ObjCMultiArgSelector:
- assert(false && "Can't mangle Objective-C selector names here!");
- break;
-
- case DeclarationName::CXXConstructorName:
- assert(false && "Can't mangle constructors yet!");
- break;
-
- case DeclarationName::CXXDestructorName:
- assert(false && "Can't mangle destructors yet!");
- break;
-
- case DeclarationName::CXXConversionFunctionName:
- // <operator-name> ::= ?B # (cast)
- // The target type is encoded as the return type.
- Out << "?B";
- break;
-
- case DeclarationName::CXXOperatorName:
- mangleOperatorName(Name.getCXXOverloadedOperator());
- break;
-
- case DeclarationName::CXXLiteralOperatorName:
- // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
- assert(false && "Don't know how to mangle literal operators yet!");
- break;
-
- case DeclarationName::CXXUsingDirective:
- assert(false && "Can't mangle a using directive name!");
- break;
- }
-}
-
-void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
- bool NoFunction) {
- // <postfix> ::= <unqualified-name> [<postfix>]
- // ::= <template-postfix> <template-args> [<postfix>]
- // ::= <template-param>
- // ::= <substitution> [<postfix>]
-
- if (!DC) return;
-
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
-
- if (DC->isTranslationUnit())
- return;
-
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
- llvm::SmallString<64> Name;
- Context.mangleBlock(GlobalDecl(), BD, Name);
- Out << Name << '@';
- return manglePostfix(DC->getParent(), NoFunction);
- }
-
- if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
- return;
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else {
- mangleUnqualifiedName(cast<NamedDecl>(DC));
- manglePostfix(DC->getParent(), NoFunction);
- }
-}
-
-void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
- switch (OO) {
- // ?0 # constructor
- // ?1 # destructor
- // <operator-name> ::= ?2 # new
- case OO_New: Out << "?2"; break;
- // <operator-name> ::= ?3 # delete
- case OO_Delete: Out << "?3"; break;
- // <operator-name> ::= ?4 # =
- case OO_Equal: Out << "?4"; break;
- // <operator-name> ::= ?5 # >>
- case OO_GreaterGreater: Out << "?5"; break;
- // <operator-name> ::= ?6 # <<
- case OO_LessLess: Out << "?6"; break;
- // <operator-name> ::= ?7 # !
- case OO_Exclaim: Out << "?7"; break;
- // <operator-name> ::= ?8 # ==
- case OO_EqualEqual: Out << "?8"; break;
- // <operator-name> ::= ?9 # !=
- case OO_ExclaimEqual: Out << "?9"; break;
- // <operator-name> ::= ?A # []
- case OO_Subscript: Out << "?A"; break;
- // ?B # conversion
- // <operator-name> ::= ?C # ->
- case OO_Arrow: Out << "?C"; break;
- // <operator-name> ::= ?D # *
- case OO_Star: Out << "?D"; break;
- // <operator-name> ::= ?E # ++
- case OO_PlusPlus: Out << "?E"; break;
- // <operator-name> ::= ?F # --
- case OO_MinusMinus: Out << "?F"; break;
- // <operator-name> ::= ?G # -
- case OO_Minus: Out << "?G"; break;
- // <operator-name> ::= ?H # +
- case OO_Plus: Out << "?H"; break;
- // <operator-name> ::= ?I # &
- case OO_Amp: Out << "?I"; break;
- // <operator-name> ::= ?J # ->*
- case OO_ArrowStar: Out << "?J"; break;
- // <operator-name> ::= ?K # /
- case OO_Slash: Out << "?K"; break;
- // <operator-name> ::= ?L # %
- case OO_Percent: Out << "?L"; break;
- // <operator-name> ::= ?M # <
- case OO_Less: Out << "?M"; break;
- // <operator-name> ::= ?N # <=
- case OO_LessEqual: Out << "?N"; break;
- // <operator-name> ::= ?O # >
- case OO_Greater: Out << "?O"; break;
- // <operator-name> ::= ?P # >=
- case OO_GreaterEqual: Out << "?P"; break;
- // <operator-name> ::= ?Q # ,
- case OO_Comma: Out << "?Q"; break;
- // <operator-name> ::= ?R # ()
- case OO_Call: Out << "?R"; break;
- // <operator-name> ::= ?S # ~
- case OO_Tilde: Out << "?S"; break;
- // <operator-name> ::= ?T # ^
- case OO_Caret: Out << "?T"; break;
- // <operator-name> ::= ?U # |
- case OO_Pipe: Out << "?U"; break;
- // <operator-name> ::= ?V # &&
- case OO_AmpAmp: Out << "?V"; break;
- // <operator-name> ::= ?W # ||
- case OO_PipePipe: Out << "?W"; break;
- // <operator-name> ::= ?X # *=
- case OO_StarEqual: Out << "?X"; break;
- // <operator-name> ::= ?Y # +=
- case OO_PlusEqual: Out << "?Y"; break;
- // <operator-name> ::= ?Z # -=
- case OO_MinusEqual: Out << "?Z"; break;
- // <operator-name> ::= ?_0 # /=
- case OO_SlashEqual: Out << "?_0"; break;
- // <operator-name> ::= ?_1 # %=
- case OO_PercentEqual: Out << "?_1"; break;
- // <operator-name> ::= ?_2 # >>=
- case OO_GreaterGreaterEqual: Out << "?_2"; break;
- // <operator-name> ::= ?_3 # <<=
- case OO_LessLessEqual: Out << "?_3"; break;
- // <operator-name> ::= ?_4 # &=
- case OO_AmpEqual: Out << "?_4"; break;
- // <operator-name> ::= ?_5 # |=
- case OO_PipeEqual: Out << "?_5"; break;
- // <operator-name> ::= ?_6 # ^=
- case OO_CaretEqual: Out << "?_6"; break;
- // ?_7 # vftable
- // ?_8 # vbtable
- // ?_9 # vcall
- // ?_A # typeof
- // ?_B # local static guard
- // ?_C # string
- // ?_D # vbase destructor
- // ?_E # vector deleting destructor
- // ?_F # default constructor closure
- // ?_G # scalar deleting destructor
- // ?_H # vector constructor iterator
- // ?_I # vector destructor iterator
- // ?_J # vector vbase constructor iterator
- // ?_K # virtual displacement map
- // ?_L # eh vector constructor iterator
- // ?_M # eh vector destructor iterator
- // ?_N # eh vector vbase constructor iterator
- // ?_O # copy constructor closure
- // ?_P<name> # udt returning <name>
- // ?_Q # <unknown>
- // ?_R0 # RTTI Type Descriptor
- // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
- // ?_R2 # RTTI Base Class Array
- // ?_R3 # RTTI Class Hierarchy Descriptor
- // ?_R4 # RTTI Complete Object Locator
- // ?_S # local vftable
- // ?_T # local vftable constructor closure
- // <operator-name> ::= ?_U # new[]
- case OO_Array_New: Out << "?_U"; break;
- // <operator-name> ::= ?_V # delete[]
- case OO_Array_Delete: Out << "?_V"; break;
-
- case OO_Conditional:
- assert(false && "Don't know how to mangle ?:");
- break;
-
- case OO_None:
- case NUM_OVERLOADED_OPERATORS:
- assert(false && "Not an overloaded operator");
- break;
- }
-}
-
-void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
- // <source name> ::= <identifier> @
- Out << II->getName() << '@';
-}
-
-void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Buffer;
- MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
- Out << Buffer;
-}
-
-void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
- bool IsMember) {
- // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
- // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
- // 'I' means __restrict (32/64-bit).
- // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
- // keyword!
- // <base-cvr-qualifiers> ::= A # near
- // ::= B # near const
- // ::= C # near volatile
- // ::= D # near const volatile
- // ::= E # far (16-bit)
- // ::= F # far const (16-bit)
- // ::= G # far volatile (16-bit)
- // ::= H # far const volatile (16-bit)
- // ::= I # huge (16-bit)
- // ::= J # huge const (16-bit)
- // ::= K # huge volatile (16-bit)
- // ::= L # huge const volatile (16-bit)
- // ::= M <basis> # based
- // ::= N <basis> # based const
- // ::= O <basis> # based volatile
- // ::= P <basis> # based const volatile
- // ::= Q # near member
- // ::= R # near const member
- // ::= S # near volatile member
- // ::= T # near const volatile member
- // ::= U # far member (16-bit)
- // ::= V # far const member (16-bit)
- // ::= W # far volatile member (16-bit)
- // ::= X # far const volatile member (16-bit)
- // ::= Y # huge member (16-bit)
- // ::= Z # huge const member (16-bit)
- // ::= 0 # huge volatile member (16-bit)
- // ::= 1 # huge const volatile member (16-bit)
- // ::= 2 <basis> # based member
- // ::= 3 <basis> # based const member
- // ::= 4 <basis> # based volatile member
- // ::= 5 <basis> # based const volatile member
- // ::= 6 # near function (pointers only)
- // ::= 7 # far function (pointers only)
- // ::= 8 # near method (pointers only)
- // ::= 9 # far method (pointers only)
- // ::= _A <basis> # based function (pointers only)
- // ::= _B <basis> # based function (far?) (pointers only)
- // ::= _C <basis> # based method (pointers only)
- // ::= _D <basis> # based method (far?) (pointers only)
- // ::= _E # block (Clang)
- // <basis> ::= 0 # __based(void)
- // ::= 1 # __based(segment)?
- // ::= 2 <name> # __based(name)
- // ::= 3 # ?
- // ::= 4 # ?
- // ::= 5 # not really based
- if (!IsMember) {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'A';
- else
- Out << 'B';
- } else {
- if (!Quals.hasConst())
- Out << 'C';
- else
- Out << 'D';
- }
- } else {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'Q';
- else
- Out << 'R';
- } else {
- if (!Quals.hasConst())
- Out << 'S';
- else
- Out << 'T';
- }
- }
-
- // FIXME: For now, just drop all extension qualifiers on the floor.
-}
-
-void MicrosoftCXXNameMangler::mangleType(QualType T) {
- // Only operate on the canonical type!
- T = getASTContext().getCanonicalType(T);
-
- Qualifiers Quals = T.getLocalQualifiers();
- if (Quals) {
- // We have to mangle these now, while we still have enough information.
- // <pointer-cvr-qualifiers> ::= P # pointer
- // ::= Q # const pointer
- // ::= R # volatile pointer
- // ::= S # const volatile pointer
- if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- if (!Quals.hasVolatile())
- Out << 'Q';
- else {
- if (!Quals.hasConst())
- Out << 'R';
- else
- Out << 'S';
- }
- } else
- // Just emit qualifiers like normal.
- // NB: When we mangle a pointer/reference type, and the pointee
- // type has no qualifiers, the lack of qualifier gets mangled
- // in there.
- mangleQualifiers(Quals, false);
- } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- Out << 'P';
- }
- switch (T->getTypeClass()) {
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
-return;
-#define TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
-break;
-#include "clang/AST/TypeNodes.def"
- }
-}
-
-void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
- // <type> ::= <builtin-type>
- // <builtin-type> ::= X # void
- // ::= C # signed char
- // ::= D # char
- // ::= E # unsigned char
- // ::= F # short
- // ::= G # unsigned short (or wchar_t if it's not a builtin)
- // ::= H # int
- // ::= I # unsigned int
- // ::= J # long
- // ::= K # unsigned long
- // L # <none>
- // ::= M # float
- // ::= N # double
- // ::= O # long double (__float80 is mangled differently)
- // ::= _D # __int8 (yup, it's a distinct type in MSVC)
- // ::= _E # unsigned __int8
- // ::= _F # __int16
- // ::= _G # unsigned __int16
- // ::= _H # __int32
- // ::= _I # unsigned __int32
- // ::= _J # long long, __int64
- // ::= _K # unsigned long long, __int64
- // ::= _L # __int128
- // ::= _M # unsigned __int128
- // ::= _N # bool
- // _O # <array in parameter>
- // ::= _T # __float80 (Intel)
- // ::= _W # wchar_t
- // ::= _Z # __float80 (Digital Mars)
- switch (T->getKind()) {
- case BuiltinType::Void: Out << 'X'; break;
- case BuiltinType::SChar: Out << 'C'; break;
- case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
- case BuiltinType::UChar: Out << 'E'; break;
- case BuiltinType::Short: Out << 'F'; break;
- case BuiltinType::UShort: Out << 'G'; break;
- case BuiltinType::Int: Out << 'H'; break;
- case BuiltinType::UInt: Out << 'I'; break;
- case BuiltinType::Long: Out << 'J'; break;
- case BuiltinType::ULong: Out << 'K'; break;
- case BuiltinType::Float: Out << 'M'; break;
- case BuiltinType::Double: Out << 'N'; break;
- // TODO: Determine size and mangle accordingly
- case BuiltinType::LongDouble: Out << 'O'; break;
- // TODO: __int8 and friends
- case BuiltinType::LongLong: Out << "_J"; break;
- case BuiltinType::ULongLong: Out << "_K"; break;
- case BuiltinType::Int128: Out << "_L"; break;
- case BuiltinType::UInt128: Out << "_M"; break;
- case BuiltinType::Bool: Out << "_N"; break;
- case BuiltinType::WChar: Out << "_W"; break;
-
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- assert(false &&
- "Overloaded and dependent types shouldn't get to name mangling");
- break;
- case BuiltinType::UndeducedAuto:
- assert(0 && "Should not see undeduced auto here");
- break;
- case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
- case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
- case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
-
- case BuiltinType::Char16:
- case BuiltinType::Char32:
- case BuiltinType::NullPtr:
- assert(false && "Don't know how to mangle this type");
- break;
- }
-}
-
-// <type> ::= <function-type>
-void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
- // Structors only appear in decls, so at this point we know it's not a
- // structor type.
- // I'll probably have mangleType(MemberPointerType) call the mangleType()
- // method directly.
- mangleType(T, NULL, false, false);
-}
-void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
- llvm_unreachable("Can't mangle K&R function prototypes");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
- const FunctionDecl *D,
- bool IsStructor,
- bool IsInstMethod) {
- // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
- // <return-type> <argument-list> <throw-spec>
- const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
-
- // If this is a C++ instance method, mangle the CVR qualifiers for the
- // this pointer.
- if (IsInstMethod)
- mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
-
- mangleCallingConvention(T);
-
- // <return-type> ::= <type>
- // ::= @ # structors (they have no declared return type)
- if (IsStructor)
- Out << '@';
- else
- mangleType(Proto->getResultType());
-
- // <argument-list> ::= X # void
- // ::= <type>+ @
- // ::= <type>* Z # varargs
- if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
- Out << 'X';
- } else {
- if (D) {
- // If we got a decl, use the "types-as-written" to make sure arrays
- // get mangled right.
- for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
- ParmEnd = D->param_end();
- Parm != ParmEnd; ++Parm)
- mangleType((*Parm)->getTypeSourceInfo()->getType());
- } else {
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- mangleType(*Arg);
- }
- // <builtin-type> ::= Z # ellipsis
- if (Proto->isVariadic())
- Out << 'Z';
- else
- Out << '@';
- }
-
- mangleThrowSpecification(Proto);
-}
-
-void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
- // <function-class> ::= A # private: near
- // ::= B # private: far
- // ::= C # private: static near
- // ::= D # private: static far
- // ::= E # private: virtual near
- // ::= F # private: virtual far
- // ::= G # private: thunk near
- // ::= H # private: thunk far
- // ::= I # protected: near
- // ::= J # protected: far
- // ::= K # protected: static near
- // ::= L # protected: static far
- // ::= M # protected: virtual near
- // ::= N # protected: virtual far
- // ::= O # protected: thunk near
- // ::= P # protected: thunk far
- // ::= Q # public: near
- // ::= R # public: far
- // ::= S # public: static near
- // ::= T # public: static far
- // ::= U # public: virtual near
- // ::= V # public: virtual far
- // ::= W # public: thunk near
- // ::= X # public: thunk far
- // ::= Y # global near
- // ::= Z # global far
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- switch (MD->getAccess()) {
- default:
- case AS_private:
- if (MD->isStatic())
- Out << 'C';
- else if (MD->isVirtual())
- Out << 'E';
- else
- Out << 'A';
- break;
- case AS_protected:
- if (MD->isStatic())
- Out << 'K';
- else if (MD->isVirtual())
- Out << 'M';
- else
- Out << 'I';
- break;
- case AS_public:
- if (MD->isStatic())
- Out << 'S';
- else if (MD->isVirtual())
- Out << 'U';
- else
- Out << 'Q';
- }
- } else
- Out << 'Y';
-}
-void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
- // <calling-convention> ::= A # __cdecl
- // ::= B # __export __cdecl
- // ::= C # __pascal
- // ::= D # __export __pascal
- // ::= E # __thiscall
- // ::= F # __export __thiscall
- // ::= G # __stdcall
- // ::= H # __export __stdcall
- // ::= I # __fastcall
- // ::= J # __export __fastcall
- // The 'export' calling conventions are from a bygone era
- // (*cough*Win16*cough*) when functions were declared for export with
- // that keyword. (It didn't actually export them, it just made them so
- // that they could be in a DLL and somebody from another module could call
- // them.)
- switch (T->getCallConv()) {
- case CC_Default:
- case CC_C: Out << 'A'; break;
- case CC_X86Pascal: Out << 'C'; break;
- case CC_X86ThisCall: Out << 'E'; break;
- case CC_X86StdCall: Out << 'G'; break;
- case CC_X86FastCall: Out << 'I'; break;
- }
-}
-void MicrosoftCXXNameMangler::mangleThrowSpecification(
- const FunctionProtoType *FT) {
- // <throw-spec> ::= Z # throw(...) (default)
- // ::= @ # throw() or __declspec/__attribute__((nothrow))
- // ::= <type>+
- // NOTE: Since the Microsoft compiler ignores throw specifications, they are
- // all actually mangled as 'Z'. (They're ignored because their associated
- // functionality isn't implemented, and probably never will be.)
- Out << 'Z';
-}
-
-void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
- assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
-}
-
-// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
-// <union-type> ::= T <name>
-// <struct-type> ::= U <name>
-// <class-type> ::= V <name>
-// <enum-type> ::= W <size> <name>
-void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
- switch (T->getDecl()->getTagKind()) {
- case TTK_Union:
- Out << 'T';
- break;
- case TTK_Struct:
- Out << 'U';
- break;
- case TTK_Class:
- Out << 'V';
- break;
- case TTK_Enum:
- Out << 'W';
- Out << getASTContext().getTypeSizeInChars(
- cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
- break;
- }
- mangleName(T->getDecl());
-}
-
-// <type> ::= <array-type>
-// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as global
-// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as param
-// It's supposed to be the other way around, but for some strange reason, it
-// isn't. Today this behavior is retained for the sole purpose of backwards
-// compatibility.
-void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
- // This isn't a recursive mangling, so now we have to do it all in this
- // one call.
- if (IsGlobal)
- Out << 'P';
- else
- Out << 'Q';
- mangleExtraDimensions(T->getElementType());
-}
-void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
- llvm::SmallVector<llvm::APInt, 3> Dimensions;
- for (;;) {
- if (ElementTy->isConstantArrayType()) {
- const ConstantArrayType *CAT =
- static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
- Dimensions.push_back(CAT->getSize());
- ElementTy = CAT->getElementType();
- } else if (ElementTy->isVariableArrayType()) {
- assert(false && "Don't know how to mangle VLAs!");
- } else if (ElementTy->isDependentSizedArrayType()) {
- // The dependent expression has to be folded into a constant (TODO).
- assert(false && "Don't know how to mangle dependent-sized arrays!");
- } else if (ElementTy->isIncompleteArrayType()) continue;
- else break;
- }
- mangleQualifiers(ElementTy.getQualifiers(), false);
- // If there are any additional dimensions, mangle them now.
- if (Dimensions.size() > 0) {
- Out << 'Y';
- // <dimension-count> ::= <number> # number of extra dimensions
- mangleNumber(Dimensions.size());
- for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
- mangleNumber(Dimensions[Dim].getLimitedValue());
- }
- }
- mangleType(ElementTy.getLocalUnqualifiedType());
-}
-
-// <type> ::= <pointer-to-member-type>
-// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
-// <class name> <type>
-void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
- QualType PointeeType = T->getPointeeType();
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
- Out << '8';
- mangleName(cast<RecordType>(T->getClass())->getDecl());
- mangleType(FPT, NULL, false, true);
- } else {
- mangleQualifiers(PointeeType.getQualifiers(), true);
- mangleName(cast<RecordType>(T->getClass())->getDecl());
- mangleType(PointeeType.getLocalUnqualifiedType());
- }
-}
-void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
- assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
-}
-
-// <type> ::= <pointer-type>
-// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
- QualType PointeeTy = T->getPointeeType();
- if (PointeeTy->isArrayType()) {
- // Pointers to arrays are mangled like arrays.
- mangleExtraDimensions(T->getPointeeType());
- } else if (PointeeTy->isFunctionType()) {
- // Function pointers are special.
- Out << '6';
- mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
- NULL, false, false);
- } else {
- if (!PointeeTy.hasQualifiers())
- // Lack of qualifiers is mangled as 'A'.
- Out << 'A';
- mangleType(PointeeTy);
- }
-}
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
- // Object pointers never have qualifiers.
- Out << 'A';
- mangleType(T->getPointeeType());
-}
-
-// <type> ::= <reference-type>
-// <reference-type> ::= A <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
- Out << 'A';
- QualType PointeeTy = T->getPointeeType();
- if (!PointeeTy.hasQualifiers())
- // Lack of qualifiers is mangled as 'A'.
- Out << 'A';
- mangleType(PointeeTy);
-}
-
-void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
- assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
- assert(false && "Don't know how to mangle ComplexTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
- assert(false && "Don't know how to mangle VectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
- assert(false && "Don't know how to mangle ExtVectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
- assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
- // ObjC interfaces have structs underlying them.
- Out << 'U';
- mangleName(T->getDecl());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
- // We don't allow overloading by different protocol qualification,
- // so mangling them isn't necessary.
- mangleType(T->getBaseType());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
- Out << "_E";
- mangleType(T->getPointeeType());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
- assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
- assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
- assert(false && "Don't know how to mangle DependentNameTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(
- const DependentTemplateSpecializationType *T) {
- assert(false &&
- "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
- assert(false && "Don't know how to mangle TypeOfTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
- assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
- assert(false && "Don't know how to mangle DecltypeTypes yet!");
-}
-
-void MicrosoftMangleContext::mangleName(const NamedDecl *D,
- llvm::SmallVectorImpl<char> &Name) {
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
-
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- getASTContext().getSourceManager(),
- "Mangling declaration");
+ // ==== Notes on array cookies =========
+ //
+ // MSVC seems to only use cookies when the class has a destructor; a
+ // two-argument usual array deallocation function isn't sufficient.
+ //
+ // For example, this code prints "100" and "1":
+ // struct A {
+ // char x;
+ // void *operator new[](size_t sz) {
+ // printf("%u\n", sz);
+ // return malloc(sz);
+ // }
+ // void operator delete[](void *p, size_t sz) {
+ // printf("%u\n", sz);
+ // free(p);
+ // }
+ // };
+ // int main() {
+ // A *p = new A[100];
+ // delete[] p;
+ // }
+ // Whereas it prints "104" and "104" if you give A a destructor.
+};
- MicrosoftCXXNameMangler Mangler(*this, Name);
- return Mangler.mangle(D);
-}
-void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle thunks!");
-}
-void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
- CXXDtorType Type,
- const ThisAdjustment &,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle destructor thunks!");
-}
-void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle guard variables!");
-}
-void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle virtual tables!");
-}
-void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &) {
- llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
-}
-void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
- int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &) {
- llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
-}
-void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle RTTI!");
-}
-void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle RTTI names!");
-}
-void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle constructors!");
-}
-void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle destructors!");
}
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 6d9d2770c7c8..894502864464 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -71,6 +71,19 @@ namespace {
/// (because these can be defined in declspecs).
virtual void HandleTagDeclDefinition(TagDecl *D) {
Builder->UpdateCompletedType(D);
+
+ // In C++, we may have member functions that need to be emitted at this
+ // point.
+ if (Ctx->getLangOptions().CPlusPlus && !D->isDependentContext()) {
+ for (DeclContext::decl_iterator M = D->decls_begin(),
+ MEnd = D->decls_end();
+ M != MEnd; ++M)
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
+ if (Method->isThisDeclarationADefinition() &&
+ (Method->hasAttr<UsedAttr>() ||
+ Method->hasAttr<ConstructorAttr>()))
+ Builder->EmitTopLevelDecl(Method);
+ }
}
virtual void HandleTranslationUnit(ASTContext &Ctx) {
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 4d221d4e657e..d74b3f32d954 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -18,7 +18,6 @@
#include "clang/AST/RecordLayout.h"
#include "llvm/Type.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -75,7 +74,8 @@ void ABIArgInfo::dump() const {
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
- << " Byal=" << getIndirectByVal();
+ << " Byal=" << getIndirectByVal()
+ << " Realign=" << getIndirectRealign();
break;
case Expand:
OS << "Expand";
@@ -330,12 +330,47 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// UseX86_MMXType - Return true if this is an MMX type that should use the special
+/// x86_mmx type.
+bool UseX86_MMXType(const llvm::Type *IRType) {
+ // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
+ // special x86_mmx type.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) {
+ if (Constraint=="y" && Ty->isVectorTy())
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ return Ty;
+}
+
//===----------------------------------------------------------------------===//
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
+ static const unsigned MinABIStackAlignInBytes = 4;
+
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
@@ -349,6 +384,9 @@ class X86_32ABIInfo : public ABIInfo {
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+ /// \brief Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
@@ -385,6 +423,13 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
+
+ const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
};
}
@@ -547,17 +592,70 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isRecordWithSSEVectorType(Context, i->getType()))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ QualType FT = i->getType();
+
+ if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
+ return true;
+
+ if (isRecordWithSSEVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (isRecordWithSSEVectorType(getContext(), Ty))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
if (!ByVal)
return ABIArgInfo::getIndirect(0, false);
- // Compute the byval alignment. We trust the back-end to honor the
- // minimum ABI alignment for byval, to make cleaner IR.
- const unsigned MinABIAlign = 4;
- unsigned Align = getContext().getTypeAlign(Ty) / 8;
- if (Align > MinABIAlign)
- return ABIArgInfo::getIndirect(Align);
- return ABIArgInfo::getIndirect(0);
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(0);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ if (StackAlign < TypeAlign)
+ return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
+ /*Realign=*/true);
+
+ return ABIArgInfo::getIndirect(StackAlign);
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
@@ -599,11 +697,18 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
}
-
+
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ if (UseX86_MMXType(IRType)) {
+ ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
+ AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
+ return AAI;
+ }
+
return ABIArgInfo::getDirect();
}
-
-
+
+
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -755,7 +860,8 @@ class X86_64ABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &neededInt,
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned &neededInt,
unsigned &neededSSE) const;
public:
@@ -768,9 +874,14 @@ public:
};
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
-class WinX86_64ABIInfo : public X86_64ABIInfo {
+class WinX86_64ABIInfo : public ABIInfo {
+
+ ABIArgInfo classify(QualType Ty) const;
+
public:
- WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {}
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -799,6 +910,13 @@ public:
return false;
}
+
+ const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
};
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -817,7 +935,7 @@ public:
const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
-
+
// 0-15 are the 16 integer registers.
// 16 is %rip.
AssignToArrayRange(Builder, Address, Eight8, 0, 16);
@@ -1065,7 +1183,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
@@ -1264,7 +1382,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
- unsigned BaseOffset = (unsigned)Layout.getBaseClassOffset(Base);
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
@@ -1443,7 +1561,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
unsigned HiAlign = TD.getABITypeAlignment(Hi);
unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
-
+
// To handle this, we have to increase the size of the low part so that the
// second element will start at an 8 byte offset. We can't increase the size
// of the second element because it might make us access off the end of the
@@ -1459,11 +1577,11 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
Lo = llvm::Type::getInt64Ty(Lo->getContext());
}
}
-
- const llvm::StructType *Result =
+
+ const llvm::StructType *Result =
llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
-
-
+
+
// Verify that the second element is at an 8-byte offset.
assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!");
@@ -1592,7 +1710,7 @@ classifyReturnType(QualType RetTy) const {
}
break;
}
-
+
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
@@ -1665,11 +1783,18 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
- case SSE:
+ case SSE: {
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ if (Hi != NoClass || !UseX86_MMXType(IRType))
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ else
+ // This is an MMX type. Treat it as such.
+ ResType = llvm::Type::getX86_MMXTy(getVMContext());
+
++neededSSE;
- ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
break;
}
+ }
const llvm::Type *HighPart = 0;
switch (Hi) {
@@ -1719,7 +1844,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
-
+
return ABIArgInfo::getDirect(ResType);
}
@@ -1965,76 +2090,48 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
-llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-//===----------------------------------------------------------------------===//
-// PIC16 ABI Implementation
-//===----------------------------------------------------------------------===//
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
-namespace {
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
-class PIC16ABIInfo : public ABIInfo {
-public:
- PIC16ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ uint64_t Size = getContext().getTypeSize(Ty);
- ABIArgInfo classifyReturnType(QualType RetTy) const;
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
+ RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ // FIXME: mingw64-gcc emits 128-bit struct as i128
+ if (Size <= 128 &&
+ (Size & (Size - 1)) == 0)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- PIC16TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PIC16ABIInfo(CGT)) {}
-};
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ return ABIArgInfo::getDirect();
}
-ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else {
- return ABIArgInfo::getDirect();
- }
-}
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty) const {
- return ABIArgInfo::getDirect();
+ QualType RetTy = FI.getReturnType();
+ FI.getReturnInfo() = classify(RetTy);
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classify(it->type);
}
-llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -2046,18 +2143,16 @@ llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
- uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
-
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
-
// PowerPC-32
namespace {
@@ -2213,6 +2308,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ //
// FIXME: This is kind of nasty... but there isn't much choice because the ARM
// backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
@@ -2321,6 +2418,10 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return ABIArgInfo::getIndirect(0);
+
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -2407,21 +2508,6 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2502,6 +2588,116 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
}
//===----------------------------------------------------------------------===//
+// MBlaze ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MBlazeABIInfo : public ABIInfo {
+public:
+ MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M)
+ const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::CallingConv::ID CC = llvm::CallingConv::C;
+ if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
+ CC = llvm::CallingConv::MBLAZE_INTR;
+ else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
+ CC = llvm::CallingConv::MBLAZE_SVOL;
+
+ if (CC != llvm::CallingConv::C) {
+ // Handle 'interrupt_handler' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(CC);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ // Step 3: Emit _interrupt_handler alias.
+ if (CC == llvm::CallingConv::MBLAZE_INTR)
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "_interrupt_handler", GV, &M.getModule());
+}
+
+
+//===----------------------------------------------------------------------===//
// MSP430 ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2534,8 +2730,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() + 0xffe0;
new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
- "vector_" +
- llvm::LowercaseString(llvm::utohexstr(Num)),
+ "vector_" + llvm::Twine::utohexstr(Num),
GV, &M.getModule());
}
}
@@ -2621,15 +2816,15 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo =
new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
- case llvm::Triple::pic16:
- return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo(Types));
-
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::systemz:
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
+ case llvm::Triple::mblaze:
+ return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
+
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
@@ -2644,6 +2839,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(Types, false, true));
@@ -2655,7 +2851,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::x86_64:
switch (Triple.getOS()) {
case llvm::Triple::Win32:
- case llvm::Triple::MinGW64:
+ case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
default:
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 9d4cf1610308..4f59eb619e9d 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -15,8 +15,11 @@
#ifndef CLANG_CODEGEN_TARGETINFO_H
#define CLANG_CODEGEN_TARGETINFO_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
class GlobalValue;
+ class Type;
class Value;
}
@@ -102,6 +105,12 @@ namespace clang {
llvm::Value *Address) const {
return Address;
}
+
+ virtual const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return Ty;
+ }
};
}