aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h21
-rw-r--r--lib/CodeGen/BackendUtil.cpp143
-rw-r--r--lib/CodeGen/CGBlocks.cpp387
-rw-r--r--lib/CodeGen/CGBlocks.h47
-rw-r--r--lib/CodeGen/CGBuiltin.cpp2692
-rw-r--r--lib/CodeGen/CGCXX.cpp29
-rw-r--r--lib/CodeGen/CGCXXABI.cpp43
-rw-r--r--lib/CodeGen/CGCXXABI.h34
-rw-r--r--lib/CodeGen/CGCall.cpp621
-rw-r--r--lib/CodeGen/CGCall.h154
-rw-r--r--lib/CodeGen/CGClass.cpp362
-rw-r--r--lib/CodeGen/CGCleanup.cpp74
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp946
-rw-r--r--lib/CodeGen/CGDebugInfo.h42
-rw-r--r--lib/CodeGen/CGDecl.cpp243
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp220
-rw-r--r--lib/CodeGen/CGException.cpp145
-rw-r--r--lib/CodeGen/CGException.h56
-rw-r--r--lib/CodeGen/CGExpr.cpp1017
-rw-r--r--lib/CodeGen/CGExprAgg.cpp580
-rw-r--r--lib/CodeGen/CGExprCXX.cpp296
-rw-r--r--lib/CodeGen/CGExprComplex.cpp66
-rw-r--r--lib/CodeGen/CGExprConstant.cpp879
-rw-r--r--lib/CodeGen/CGExprScalar.cpp335
-rw-r--r--lib/CodeGen/CGObjC.cpp836
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp296
-rw-r--r--lib/CodeGen/CGObjCMac.cpp851
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp74
-rw-r--r--lib/CodeGen/CGObjCRuntime.h28
-rw-r--r--lib/CodeGen/CGRTTI.cpp59
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp183
-rw-r--r--lib/CodeGen/CGStmt.cpp62
-rw-r--r--lib/CodeGen/CGTemporaries.cpp49
-rw-r--r--lib/CodeGen/CGVTT.cpp21
-rw-r--r--lib/CodeGen/CGVTables.cpp40
-rw-r--r--lib/CodeGen/CGValue.h84
-rw-r--r--lib/CodeGen/CMakeLists.txt3
-rw-r--r--lib/CodeGen/CodeGenAction.cpp107
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp180
-rw-r--r--lib/CodeGen/CodeGenFunction.h407
-rw-r--r--lib/CodeGen/CodeGenModule.cpp617
-rw-r--r--lib/CodeGen/CodeGenModule.h115
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp6
-rw-r--r--lib/CodeGen/CodeGenTBAA.h4
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp44
-rw-r--r--lib/CodeGen/CodeGenTypes.h109
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp334
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp7
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp17
-rw-r--r--lib/CodeGen/TargetInfo.cpp882
-rw-r--r--lib/CodeGen/TargetInfo.h5
51 files changed, 10455 insertions, 4397 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 138123816c3e..2853bc80a5d4 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -42,7 +42,8 @@ namespace clang {
/// type, or by coercing to another specified type stored in
/// 'CoerceToType'). If an offset is specified (in UIntData), then the
/// argument passed is offset by some number of bytes in the memory
- /// representation.
+ /// representation. A dummy argument is emitted before the real argument
+ /// if the specified type stored in "PaddingType" is not zero.
Direct,
/// Extend - Valid only for integer argument types. Same as 'direct'
@@ -69,19 +70,22 @@ namespace clang {
private:
Kind TheKind;
llvm::Type *TypeData;
+ llvm::Type *PaddingType; // Currently allowed only for Direct.
unsigned UIntData;
bool BoolData0;
bool BoolData1;
- ABIArgInfo(Kind K, llvm::Type *TD=0,
- unsigned UI=0, bool B0 = false, bool B1 = false)
- : TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
+ ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
+ bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ : TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
+ BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0) {
- return ABIArgInfo(Direct, T, Offset);
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
+ llvm::Type *Padding = 0) {
+ return ABIArgInfo(Direct, T, Offset, false, false, Padding);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
return ABIArgInfo(Extend, T, 0);
@@ -113,6 +117,11 @@ namespace clang {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
return UIntData;
}
+
+ llvm::Type *getPaddingType() const {
+ return PaddingType;
+ }
+
llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index b9e3ed9edd19..2f44711b607b 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -43,7 +43,7 @@ namespace {
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
const CodeGenOptions &CodeGenOpts;
- const TargetOptions &TargetOpts;
+ const clang::TargetOptions &TargetOpts;
const LangOptions &LangOpts;
Module *TheModule;
@@ -87,7 +87,8 @@ private:
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
- const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M)
: Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
@@ -105,6 +106,11 @@ public:
}
+static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCAPElimPass());
+}
+
static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
if (Builder.OptLevel > 0)
PM.add(createObjCARCExpandPass());
@@ -115,6 +121,16 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
+static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createAddressSanitizerPass());
+}
+
+static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createThreadSanitizerPass());
+}
+
void EmitAssemblyHelper::CreatePasses() {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
@@ -138,10 +154,26 @@ void EmitAssemblyHelper::CreatePasses() {
if (LangOpts.ObjCAutoRefCount) {
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
+ addObjCARCAPElimPass);
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addObjCARCOptPass);
}
-
+
+ if (LangOpts.AddressSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addAddressSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addAddressSanitizerPass);
+ }
+
+ if (LangOpts.ThreadSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addThreadSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addThreadSanitizerPass);
+ }
+
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
@@ -164,7 +196,11 @@ void EmitAssemblyHelper::CreatePasses() {
}
case CodeGenOptions::OnlyAlwaysInlining:
// Respect always_inline.
- PMBuilder.Inliner = createAlwaysInlinerPass();
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
break;
}
@@ -206,35 +242,6 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// being gross, this is also totally broken if we ever care about
// concurrency.
- // Set frame pointer elimination mode.
- if (!CodeGenOpts.DisableFPElim) {
- llvm::NoFramePointerElim = false;
- llvm::NoFramePointerElimNonLeaf = false;
- } else if (CodeGenOpts.OmitLeafFramePointer) {
- llvm::NoFramePointerElim = false;
- llvm::NoFramePointerElimNonLeaf = true;
- } else {
- llvm::NoFramePointerElim = true;
- llvm::NoFramePointerElimNonLeaf = true;
- }
-
- // Set float ABI type.
- if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
- llvm::FloatABIType = llvm::FloatABI::Soft;
- else if (CodeGenOpts.FloatABI == "hard")
- llvm::FloatABIType = llvm::FloatABI::Hard;
- else {
- assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
- llvm::FloatABIType = llvm::FloatABI::Default;
- }
-
- llvm::LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
- llvm::NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
- llvm::NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
- NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- llvm::UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
- llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
-
TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
@@ -255,7 +262,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
CM = llvm::CodeModel::Default;
}
- std::vector<const char *> BackendArgs;
+ SmallVector<const char *, 16> BackendArgs;
BackendArgs.push_back("clang"); // Fake program name.
if (!CodeGenOpts.DebugPass.empty()) {
BackendArgs.push_back("-debug-pass");
@@ -273,7 +280,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
BackendArgs.push_back("-global-merge=false");
BackendArgs.push_back(0);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
- const_cast<char **>(&BackendArgs[0]));
+ BackendArgs.data());
std::string FeaturesStr;
if (TargetOpts.Features.size()) {
@@ -296,8 +303,52 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
RM = llvm::Reloc::DynamicNoPIC;
}
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ llvm::TargetOptions Options;
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = true;
+ } else {
+ Options.NoFramePointerElim = true;
+ Options.NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
+ Options.FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Options.FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ Options.FloatABIType = llvm::FloatABI::Default;
+ }
+
+ Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UseSoftFloat = CodeGenOpts.SoftFloat;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.RealignStack = CodeGenOpts.StackRealignment;
+ Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
+ Options.TrapFuncName = CodeGenOpts.TrapFuncName;
+ Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
- FeaturesStr, RM, CM);
+ FeaturesStr, Options,
+ RM, CM, OptLevel);
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
@@ -305,18 +356,19 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCSaveTempLabels(true);
if (CodeGenOpts.NoDwarf2CFIAsm)
TM->setMCUseCFI(false);
+ if (!CodeGenOpts.NoDwarfDirectoryAsm)
+ TM->setMCUseDwarfDirectory(true);
if (CodeGenOpts.NoExecStack)
TM->setMCNoExecStack(true);
// Create the code generator passes.
PassManager *PM = getCodeGenPasses();
- CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
- switch (CodeGenOpts.OptimizationLevel) {
- default: break;
- case 0: OptLevel = CodeGenOpt::None; break;
- case 3: OptLevel = CodeGenOpt::Aggressive; break;
- }
+ // Add LibraryInfo.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ PM->add(TLI);
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
@@ -331,10 +383,11 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// Add ObjC ARC final-cleanup optimizations. This is done as part of the
// "codegen" passes so that it isn't run multiple times when there is
// inlining happening.
- if (LangOpts.ObjCAutoRefCount)
+ if (LangOpts.ObjCAutoRefCount &&
+ CodeGenOpts.OptimizationLevel > 0)
PM->add(createObjCARCContractPass());
- if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT,
/*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
return false;
@@ -397,7 +450,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const CodeGenOptions &CGOpts,
- const TargetOptions &TOpts,
+ const clang::TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M,
BackendAction Action, raw_ostream *OS) {
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 969495376642..27bb4ef5d1af 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -25,13 +25,15 @@
using namespace clang;
using namespace CodeGen;
-CGBlockInfo::CGBlockInfo(const BlockExpr *blockExpr, const char *N)
- : Name(N), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), StructureType(0), Block(blockExpr) {
+CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ DominatingIP(0) {
- // Skip asm prefix, if any.
- if (Name && Name[0] == '\01')
- ++Name;
+ // Skip asm prefix, if any. 'name' is usually taken directly from
+ // the mangled name of the enclosing function.
+ if (!name.empty() && name[0] == '\01')
+ name = name.substr(1);
}
// Anchor the vtable to this translation unit.
@@ -90,7 +92,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
// GC layout.
- if (C.getLangOptions().ObjC1)
+ if (C.getLangOpts().ObjC1)
elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
elements.push_back(llvm::Constant::getNullValue(i8p));
@@ -213,6 +215,7 @@ static bool isSafeForCXXConstantCapture(QualType type) {
/// acceptable because we make no promises about address stability of
/// captured variables.
static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
const VarDecl *var) {
QualType type = var->getType();
@@ -224,7 +227,7 @@ static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
// Except that any class member declared mutable can be
// modified, any attempt to modify a const object during its
// lifetime results in undefined behavior.
- if (CGM.getLangOptions().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type))
return 0;
// If the variable doesn't have any initializer (shouldn't this be
@@ -233,7 +236,7 @@ static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
const Expr *init = var->getInit();
if (!init) return 0;
- return CGM.EmitConstantExpr(init, var->getType());
+ return CGM.EmitConstantInit(*var, CGF);
}
/// Get the low bit of a nonzero character count. This is the
@@ -276,7 +279,8 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
/// Compute the layout of the given block. Attempts to lay the block
/// out with minimal space requirements.
-static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
+static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
+ CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
@@ -340,7 +344,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// Otherwise, build a layout chunk with the size and alignment of
// the declaration.
- if (llvm::Constant *constant = tryCaptureAsConstant(CGM, variable)) {
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
continue;
}
@@ -370,7 +374,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
info.HasCXXObject = true;
// And so do types with destructors.
- } else if (CGM.getLangOptions().CPlusPlus) {
+ } else if (CGM.getLangOpts().CPlusPlus) {
if (const CXXRecordDecl *record =
variable->getType()->getAsCXXRecordDecl()) {
if (!record->hasTrivialDestructor()) {
@@ -380,13 +384,15 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
}
}
- CharUnits size = C.getTypeSizeInChars(variable->getType());
+ QualType VT = variable->getType();
+ CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
+
maxFieldAlign = std::max(maxFieldAlign, align);
llvm::Type *llvmType =
- CGM.getTypes().ConvertTypeForMem(variable->getType());
-
+ CGM.getTypes().ConvertTypeForMem(VT);
+
layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
}
@@ -481,18 +487,146 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
+/// Enter the scope of a block. This should be run at the entrance to
+/// a full-expression so that the block's cleanups are pushed at the
+/// right place in the stack.
+static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
+ // Allocate the block info and place it at the head of the list.
+ CGBlockInfo &blockInfo =
+ *new CGBlockInfo(block, CGF.CurFn->getName());
+ blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
+ CGF.FirstBlockInfo = &blockInfo;
+
+ // Compute information about the layout, etc., of this block,
+ // pushing cleanups as necessary.
+ computeBlockInfo(CGF.CGM, &CGF, blockInfo);
+
+ // Nothing else to do if it can be global.
+ if (blockInfo.CanBeGlobal) return;
+
+ // Make the allocation for the block.
+ blockInfo.Address =
+ CGF.CreateTempAlloca(blockInfo.StructureType, "block");
+ blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // If there are cleanups to emit, enter them (but inactive).
+ if (!blockInfo.NeedsCopyDispose) return;
+
+ // Walk through the captures (in order) and find the ones not
+ // captured by constant.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ // Ignore __block captures; there's nothing special in the
+ // on-stack block that we need to do for them.
+ if (ci->isByRef()) continue;
+
+ // Ignore variables that are constant-captured.
+ const VarDecl *variable = ci->getVariable();
+ CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind =
+ variable->getType().isDestructedType();
+ if (dtorKind == QualType::DK_none) continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ // GEP down to the address.
+ llvm::Value *addr = CGF.Builder.CreateStructGEP(blockInfo.Address,
+ capture.getIndex());
+
+ // We can use that GEP as the dominating IP.
+ if (!blockInfo.DominatingIP)
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+
+ CleanupKind cleanupKind = InactiveNormalCleanup;
+ bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = InactiveNormalAndEHCleanup;
+
+ CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ destroyer, useArrayEHCleanup);
+
+ // Remember where that cleanup was.
+ capture.setCleanup(CGF.EHStack.stable_begin());
+ }
+}
+
+/// Enter a full-expression with a non-trivial number of objects to
+/// clean up. This is in this file because, at the moment, the only
+/// kind of cleanup object is a BlockDecl*.
+void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
+ assert(E->getNumObjects() != 0);
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
+ for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
+ i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
+ enterBlockScope(*this, *i);
+ }
+}
+
+/// Find the layout for the given block in a linked list and remove it.
+static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
+ const BlockDecl *block) {
+ while (true) {
+ assert(head && *head);
+ CGBlockInfo *cur = *head;
+
+ // If this is the block we're looking for, splice it out of the list.
+ if (cur->getBlockDecl() == block) {
+ *head = cur->NextBlockInfo;
+ return cur;
+ }
+
+ head = &cur->NextBlockInfo;
+ }
+}
+
+/// Destroy a chain of block layouts.
+void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
+ assert(head && "destroying an empty chain");
+ do {
+ CGBlockInfo *cur = head;
+ head = cur->NextBlockInfo;
+ delete cur;
+ } while (head != 0);
+}
+
/// Emit a block literal expression in the current function.
llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
- std::string Name = CurFn->getName();
- CGBlockInfo blockInfo(blockExpr, Name.c_str());
+ // If the block has no captures, we won't have a pre-computed
+ // layout for it.
+ if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ return EmitBlockLiteral(blockInfo);
+ }
- // Compute information about the layout, etc., of this block.
- computeBlockInfo(CGM, blockInfo);
+ // Find the block info for this block and take ownership of it.
+ OwningPtr<CGBlockInfo> blockInfo;
+ blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
+ blockExpr->getBlockDecl()));
- // Using that metadata, generate the actual block function.
+ blockInfo->BlockExpression = blockExpr;
+ return EmitBlockLiteral(*blockInfo);
+}
+
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
+ // Using the computed layout, generate the actual block function.
+ bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
llvm::Constant *blockFn
= CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
- CurFuncDecl, LocalDeclMap);
+ CurFuncDecl, LocalDeclMap,
+ isLambdaConv);
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
@@ -507,11 +641,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- llvm::Type *intTy = ConvertType(getContext().IntTy);
-
- llvm::AllocaInst *blockAddr =
- CreateTempAlloca(blockInfo.StructureType, "block");
- blockAddr->setAlignment(blockInfo.BlockAlign.getQuantity());
+ llvm::AllocaInst *blockAddr = blockInfo.Address;
+ assert(blockAddr && "block has no address!");
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
@@ -521,9 +652,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Initialize the block literal.
Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
- Builder.CreateStore(llvm::ConstantInt::get(intTy, flags.getBitMask()),
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
- Builder.CreateStore(llvm::ConstantInt::get(intTy, 0),
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, 0),
Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
"block.invoke"));
@@ -570,6 +701,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
"block.capture.addr");
+ } else if (blockDecl->isConversionFromLambda()) {
+ // The lambda capture in a lambda's conversion-to-block-pointer is
+ // special; we'll simply emit it directly.
+ src = 0;
} else {
// This is a [[type]]*.
src = LocalDeclMap[variable];
@@ -591,7 +726,19 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If we have a copy constructor, evaluate that into the block field.
} else if (const Expr *copyExpr = ci->getCopyExpr()) {
- EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ if (blockDecl->isConversionFromLambda()) {
+ // If we have a lambda conversion, emit the expression
+ // directly into the block instead.
+ CharUnits Align = getContext().getTypeAlignInChars(type);
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(blockField, Align, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(copyExpr, Slot);
+ } else {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ }
// If it's a reference variable, copy the reference into the block field.
} else if (type->isReferenceType()) {
@@ -606,45 +753,23 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// We use one of these or the other depending on whether the
// reference is nested.
- DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
- SourceLocation());
- BlockDeclRefExpr nested(const_cast<VarDecl*>(variable), type,
- VK_LValue, SourceLocation(), /*byref*/ false);
-
- Expr *declRef =
- (ci->isNested() ? static_cast<Expr*>(&nested) : &notNested);
+ DeclRefExpr declRef(const_cast<VarDecl*>(variable),
+ /*refersToEnclosing*/ ci->isNested(), type,
+ VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
- declRef, VK_RValue);
+ &declRef, VK_RValue);
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
MakeAddrLValue(blockField, type,
- getContext().getDeclAlign(variable)
- .getQuantity()),
+ getContext().getDeclAlign(variable)),
/*captured by init*/ false);
}
- // Push a destructor if necessary. The semantics for when this
- // actually gets run are really obscure.
+ // Activate the cleanup if layout pushed one.
if (!ci->isByRef()) {
- switch (QualType::DestructionKind dtorKind = type.isDestructedType()) {
- case QualType::DK_none:
- break;
-
- // Block captures count as local values and have imprecise semantics.
- // They also can't be arrays, so need to worry about that.
- case QualType::DK_objc_strong_lifetime: {
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = &destroyARCStrongImprecise;
- pushDestroy(getCleanupKind(dtorKind), blockField, type,
- *destroyer, /*useEHCleanupForArray*/ false);
- break;
- }
-
- case QualType::DK_objc_weak_lifetime:
- case QualType::DK_cxx_destructor:
- pushDestroy(dtorKind, blockField, type);
- break;
- }
+ EHScopeStack::stable_iterator cleanup = capture.getCleanup();
+ if (cleanup.isValid())
+ ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
}
}
@@ -744,11 +869,11 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
llvm::Value *Func = Builder.CreateLoad(FuncPtr);
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FuncTy);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
// Cast the function pointer to the right type.
- llvm::Type *BlockFTy =
- CGM.getTypes().GetFunctionType(FnInfo, false);
+ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
@@ -798,10 +923,11 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
llvm::Constant *
CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
const char *name) {
- CGBlockInfo blockInfo(blockExpr, name);
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
+ blockInfo.BlockExpression = blockExpr;
// Compute information about the layout, etc., of this block.
- computeBlockInfo(*this, blockInfo);
+ computeBlockInfo(*this, 0, blockInfo);
// Using that metadata, generate the actual block function.
llvm::Constant *blockFn;
@@ -809,7 +935,8 @@ CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
blockInfo,
- 0, LocalDeclMap);
+ 0, LocalDeclMap,
+ false);
}
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
@@ -863,7 +990,8 @@ llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &blockInfo,
const Decl *outerFnDecl,
- const DeclMapTy &ldm) {
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// Check if we should generate debug info for this block function.
@@ -901,16 +1029,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
args.push_back(*i);
// Create the function declaration.
- const FunctionProtoType *fnType =
- cast<FunctionProtoType>(blockInfo.getBlockExpr()->getFunctionType());
+ const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
const CGFunctionInfo &fnInfo =
- CGM.getTypes().getFunctionInfo(fnType->getResultType(), args,
- fnType->getExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(fnType->getResultType(), args,
+ fnType->getExtInfo(),
+ fnType->isVariadic());
if (CGM.ReturnTypeUsesSRet(fnInfo))
blockInfo.UsesStret = true;
- llvm::FunctionType *fnLLVMType =
- CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
+ llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
MangleBuffer name;
CGM.getBlockMangledName(GD, name, blockDecl);
@@ -976,12 +1103,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
LocalDeclMap[variable] = alloca;
}
- // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+ // Save a spot to insert the debug information for all the DeclRefExprs.
llvm::BasicBlock *entry = Builder.GetInsertBlock();
llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
--entry_ptr;
- EmitStmt(blockDecl->getBody());
+ if (IsLambdaConversionToBlock)
+ EmitLambdaBlockInvokeBody();
+ else
+ EmitStmt(blockDecl->getBody());
// Remember where we were...
llvm::BasicBlock *resume = Builder.GetInsertBlock();
@@ -990,7 +1120,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
++entry_ptr;
Builder.SetInsertPoint(entry, entry_ptr);
- // Emit debug information for all the BlockDeclRefDecls.
+ // Emit debug information for all the DeclRefExprs.
// FIXME: also for 'this'
if (CGDebugInfo *DI = getDebugInfo()) {
for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
@@ -1052,11 +1182,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
args.push_back(&srcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
- llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1076,7 +1208,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
SC_Static,
SC_None,
false,
- true);
+ false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1119,7 +1251,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures:
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
// Don't generate special copy logic for a captured object
@@ -1167,11 +1299,13 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
args.push_back(&srcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1189,7 +1323,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1229,7 +1363,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
// Don't generate special dispose logic for a captured object
@@ -1340,15 +1474,23 @@ public:
// Do a "move" by copying the value and then zeroing out the old
// variable.
- llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
+ value->setAlignment(Alignment.getQuantity());
+
llvm::Value *null =
llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
- CGF.Builder.CreateStore(value, destField);
- CGF.Builder.CreateStore(null, srcField);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
+ store->setAlignment(Alignment.getQuantity());
+
+ store = CGF.Builder.CreateStore(null, srcField);
+ store->setAlignment(Alignment.getQuantity());
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::Value *value = CGF.Builder.CreateLoad(field);
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
CGF.EmitARCRelease(value, /*precise*/ false);
}
@@ -1358,6 +1500,39 @@ public:
}
};
+/// Emits the copy/dispose helpers for an ARC __block __strong
+/// variable that's of block-pointer type.
+class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do the copy with objc_retainBlock; that's all that
+ // _Block_object_assign would do anyway, and we'd have to pass the
+ // right arguments to make sure it doesn't get no-op'ed.
+ llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
+ oldValue->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 2 is distinguishable from all pointers and byref flags
+ id.AddInteger(2);
+ }
+};
+
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
@@ -1404,10 +1579,12 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
args.push_back(&src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1424,7 +1601,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
@@ -1472,10 +1649,12 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
args.push_back(&src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1493,7 +1672,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsDispose()) {
@@ -1565,7 +1744,7 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// If we have lifetime, that dominates.
if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
- assert(getLangOptions().ObjCAutoRefCount);
+ assert(getLangOpts().ObjCAutoRefCount);
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
@@ -1584,13 +1763,10 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
- // Block-pointers need to be _Block_copy'ed, so we let the
- // runtime be in charge. But we can't use the code below
- // because we don't want to set BYREF_CALLER, which will
- // just make the runtime ignore us.
+ // Block pointers need to be copied, and there's no direct
+ // transfer possible.
if (type->isBlockPointerType()) {
- BlockFieldFlags flags = BLOCK_FIELD_IS_BLOCK;
- ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment);
return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
// Otherwise, we transfer ownership of the retain from the stack
@@ -1674,7 +1850,8 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// int32_t __size;
types.push_back(Int32Ty);
- bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
+ bool HasCopyAndDispose =
+ (Ty->isObjCRetainableType()) || getContext().getBlockVarCopyInits(D);
if (HasCopyAndDispose) {
/// void *__copy_helper;
types.push_back(Int8PtrTy);
@@ -1701,7 +1878,7 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ llvm::Type *Ty = Int8Ty;
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignment and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -1803,7 +1980,7 @@ namespace {
/// to be done externally.
void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
// We don't enter this cleanup if we're in pure-GC mode.
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
return;
EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
@@ -1812,7 +1989,7 @@ void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
/// Adjust the declaration of something from the blocks API.
static void configureBlocksRuntimeObject(CodeGenModule &CGM,
llvm::Constant *C) {
- if (!CGM.getLangOptions().BlocksRuntimeOptional) return;
+ if (!CGM.getLangOpts().BlocksRuntimeOptional) return;
llvm::GlobalValue *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
if (GV->isDeclaration() &&
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 6e71c1fdc041..095cfdb259c3 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -23,6 +23,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "CodeGenFunction.h"
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
@@ -128,13 +129,14 @@ inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
class CGBlockInfo {
public:
/// Name - The name of the block, kindof.
- const char *Name;
+ llvm::StringRef Name;
/// The field index of 'this' within the block, if there is one.
unsigned CXXThisIndex;
class Capture {
uintptr_t Data;
+ EHScopeStack::stable_iterator Cleanup;
public:
bool isIndex() const { return (Data & 1) != 0; }
@@ -144,6 +146,14 @@ public:
assert(isConstant());
return reinterpret_cast<llvm::Value*>(Data);
}
+ EHScopeStack::stable_iterator getCleanup() const {
+ assert(isIndex());
+ return Cleanup;
+ }
+ void setCleanup(EHScopeStack::stable_iterator cleanup) {
+ assert(isIndex());
+ Cleanup = cleanup;
+ }
static Capture makeIndex(unsigned index) {
Capture v;
@@ -158,9 +168,6 @@ public:
}
};
- /// The mapping of allocated indexes within the block.
- llvm::DenseMap<const VarDecl*, Capture> Captures;
-
/// CanBeGlobal - True if the block can be global, i.e. it has
/// no non-constant captures.
bool CanBeGlobal : 1;
@@ -176,22 +183,44 @@ public:
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ llvm::AllocaInst *Address;
llvm::StructType *StructureType;
- const BlockExpr *Block;
+ const BlockDecl *Block;
+ const BlockExpr *BlockExpression;
CharUnits BlockSize;
CharUnits BlockAlign;
+ /// An instruction which dominates the full-expression that the
+ /// block is inside.
+ llvm::Instruction *DominatingIP;
+
+ /// The next block in the block-info chain. Invalid if this block
+ /// info is not part of the CGF's block-info chain, which is true
+ /// if it corresponds to a global block or a block whose expression
+ /// has been encountered.
+ CGBlockInfo *NextBlockInfo;
+
const Capture &getCapture(const VarDecl *var) const {
- llvm::DenseMap<const VarDecl*, Capture>::const_iterator
+ return const_cast<CGBlockInfo*>(this)->getCapture(var);
+ }
+ Capture &getCapture(const VarDecl *var) {
+ llvm::DenseMap<const VarDecl*, Capture>::iterator
it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
return it->second;
}
- const BlockDecl *getBlockDecl() const { return Block->getBlockDecl(); }
- const BlockExpr *getBlockExpr() const { return Block; }
+ const BlockDecl *getBlockDecl() const { return Block; }
+ const BlockExpr *getBlockExpr() const {
+ assert(BlockExpression);
+ assert(BlockExpression->getBlockDecl() == Block);
+ return BlockExpression;
+ }
- CGBlockInfo(const BlockExpr *blockExpr, const char *Name);
+ CGBlockInfo(const BlockDecl *blockDecl, llvm::StringRef Name);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index ec0ca424220e..e30b5136ba40 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -16,7 +16,6 @@
#include "CodeGenModule.h"
#include "CGObjCRuntime.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -176,7 +175,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
- if (E->Evaluate(Result, CGM.getContext()) &&
+ if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
@@ -215,7 +214,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
- case Builtin::BI__builtin_abs: {
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
@@ -228,6 +229,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
@@ -237,12 +239,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue);
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
@@ -252,7 +256,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue);
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -268,7 +273,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue),
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
+ Builder.getTrue()),
llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
@@ -534,7 +540,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BImemcpy:
@@ -542,7 +549,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
return RValue::get(Address);
}
@@ -557,7 +566,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Dest = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemCpy(Dest, Src, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
return RValue::get(Dest);
}
@@ -581,7 +592,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Dest = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemMove(Dest, Src, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
return RValue::get(Dest);
}
@@ -590,7 +603,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BImemset:
@@ -599,7 +614,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BI__builtin___memset_chk: {
@@ -614,7 +630,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
return RValue::get(Address);
}
@@ -925,12 +942,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- llvm::Type *ElLLVMTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
- llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr);
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
return RValue::get(0);
@@ -948,10 +966,186 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
case Builtin::BI__atomic_thread_fence:
- case Builtin::BI__atomic_signal_fence: {
+ case Builtin::BI__atomic_signal_fence:
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence: {
llvm::SynchronizationScope Scope;
- if (BuiltinID == Builtin::BI__atomic_signal_fence)
+ if (BuiltinID == Builtin::BI__atomic_signal_fence ||
+ BuiltinID == Builtin::BI__c11_atomic_signal_fence)
Scope = llvm::SingleThread;
else
Scope = llvm::CrossThread;
@@ -1145,8 +1339,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
- llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
- if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+ llvm::Type *RetTy = VoidTy;
+ if (!BuiltinRetType->isVoidType())
+ RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
@@ -1181,30 +1376,37 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
return EmitPPCBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::hexagon:
+ return EmitHexagonBuiltinExpr(BuiltinID, E);
default:
return 0;
}
}
-static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
- switch (type) {
- default: break;
- case 0:
- case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q);
- case 6:
- case 7:
- case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q);
- case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q);
- case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q);
- case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q);
- };
- return 0;
+static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags) {
+ int IsQuad = TypeFlags.isQuad();
+ switch (TypeFlags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ case NeonTypeFlags::Float16:
+ return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
+ case NeonTypeFlags::Float32:
+ return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
+ }
+ llvm_unreachable("Invalid NeonTypeFlags element type!");
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
- SmallVector<Constant*, 16> Indices(nElts, C);
- Value* SV = llvm::ConstantVector::get(Indices);
+ Value* SV = llvm::ConstantVector::getSplat(nElts, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
@@ -1224,26 +1426,28 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
- ConstantInt *CI = cast<ConstantInt>(V);
- int SV = CI->getSExtValue();
+ int SV = cast<ConstantInt>(V)->getSExtValue();
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
- SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
- return llvm::ConstantVector::get(CV);
+ return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
}
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
-static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
+unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
unsigned Align = 1;
// Check if the type is a pointer. The implicit cast operand might not be.
while (Addr->getType()->isPointerType()) {
QualType PtTy = Addr->getType()->getPointeeType();
- unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity();
- if (NewA > Align)
- Align = NewA;
+
+ // Can't get alignment of incomplete types.
+ if (!PtTy->isIncompleteType()) {
+ unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+ }
// If the address is an implicit cast, repeat with the cast operand.
if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
@@ -1252,7 +1456,14 @@ static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
}
break;
}
- return llvm::ConstantInt::get(CGF.Int32Ty, Align);
+ return Align;
+}
+
+/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+/// the alignment of the type referenced by the pointer. Skip over implicit
+/// casts. Return the alignment as an llvm::Value.
+Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
+ return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
@@ -1349,9 +1560,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
- Ty = llvm::Type::getFloatTy(getLLVMContext());
+ Ty = FloatTy;
else
- Ty = llvm::Type::getDoubleTy(getLLVMContext());
+ Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
@@ -1363,14 +1574,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
// Determine the type of this overloaded NEON intrinsic.
- unsigned type = Result.getZExtValue();
- bool usgn = type & 0x08;
- bool quad = type & 0x10;
- bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
- (void)poly; // Only used in assert()s.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
+ llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1429,34 +1638,40 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(F, Ops, "vcnt");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
- assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin");
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f16_v builtin");
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
return EmitNeonCall(F, Ops, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_f32_f16: {
- assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin");
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f32_f16 builtin");
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
return EmitNeonCall(F, Ops, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_f32_v:
- case ARM::BI__builtin_neon_vcvtq_f32_v: {
+ case ARM::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(getLLVMContext(), 4, quad);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- }
case ARM::BI__builtin_neon_vcvt_s32_v:
case ARM::BI__builtin_neon_vcvt_u32_v:
case ARM::BI__builtin_neon_vcvtq_s32_v:
case ARM::BI__builtin_neon_vcvtq_u32_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad));
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
- Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { FloatTy, Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
+ : Intrinsic::arm_neon_vcvtfxs2fp;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -1464,8 +1679,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
- Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { Ty, FloatTy };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
+ : Intrinsic::arm_neon_vcvtfp2fxs;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -1491,30 +1709,35 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1_lane_v:
- case ARM::BI__builtin_neon_vld1q_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
case ARM::BI__builtin_neon_vld1_dup_v:
case ARM::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
- Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
}
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1523,7 +1746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1532,7 +1755,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1543,7 +1766,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1555,7 +1778,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1568,7 +1791,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1584,15 +1807,15 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::arm_neon_vld2;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ Int = Intrinsic::arm_neon_vld3;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1603,10 +1826,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::arm_neon_vld2lane;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ Int = Intrinsic::arm_neon_vld3lane;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
@@ -1619,7 +1842,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
@@ -1656,12 +1879,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vmul_v:
case ARM::BI__builtin_neon_vmulq_v:
- assert(poly && "vmul builtin only supported for polynomial types");
+ assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
- Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
@@ -1852,43 +2075,48 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1_lane_v:
- case ARM::BI__builtin_neon_vst1q_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ StoreInst *St = Builder.CreateStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return St;
+ }
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
@@ -1937,8 +2165,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
- Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ Indices.push_back(Builder.getInt32(i+vi));
+ Indices.push_back(Builder.getInt32(i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
@@ -1990,7 +2218,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
llvm::Value *CodeGenFunction::
-BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
+BuildVector(ArrayRef<llvm::Value*> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
@@ -1999,7 +2227,7 @@ BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
- std::vector<llvm::Constant*> CstOps;
+ SmallVector<llvm::Constant*, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
@@ -2010,8 +2238,7 @@ BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- Result = Builder.CreateInsertElement(Result, Ops[i],
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i));
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
return Result;
}
@@ -2043,61 +2270,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
- case X86::BI__builtin_ia32_pslldi128:
- case X86::BI__builtin_ia32_psllqi128:
- case X86::BI__builtin_ia32_psllwi128:
- case X86::BI__builtin_ia32_psradi128:
- case X86::BI__builtin_ia32_psrawi128:
- case X86::BI__builtin_ia32_psrldi128:
- case X86::BI__builtin_ia32_psrlqi128:
- case X86::BI__builtin_ia32_psrlwi128: {
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
- llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
- Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
- Ops[1], Zero, "insert");
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
- const char *name = 0;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported shift intrinsic!");
- case X86::BI__builtin_ia32_pslldi128:
- name = "pslldi";
- ID = Intrinsic::x86_sse2_psll_d;
- break;
- case X86::BI__builtin_ia32_psllqi128:
- name = "psllqi";
- ID = Intrinsic::x86_sse2_psll_q;
- break;
- case X86::BI__builtin_ia32_psllwi128:
- name = "psllwi";
- ID = Intrinsic::x86_sse2_psll_w;
- break;
- case X86::BI__builtin_ia32_psradi128:
- name = "psradi";
- ID = Intrinsic::x86_sse2_psra_d;
- break;
- case X86::BI__builtin_ia32_psrawi128:
- name = "psrawi";
- ID = Intrinsic::x86_sse2_psra_w;
- break;
- case X86::BI__builtin_ia32_psrldi128:
- name = "psrldi";
- ID = Intrinsic::x86_sse2_psrl_d;
- break;
- case X86::BI__builtin_ia32_psrlqi128:
- name = "psrlqi";
- ID = Intrinsic::x86_sse2_psrl_q;
- break;
- case X86::BI__builtin_ia32_psrlwi128:
- name = "psrlwi";
- ID = Intrinsic::x86_sse2_psrl_w;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, name);
- }
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
@@ -2106,66 +2278,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
- case X86::BI__builtin_ia32_pslldi:
- case X86::BI__builtin_ia32_psllqi:
- case X86::BI__builtin_ia32_psllwi:
- case X86::BI__builtin_ia32_psradi:
- case X86::BI__builtin_ia32_psrawi:
- case X86::BI__builtin_ia32_psrldi:
- case X86::BI__builtin_ia32_psrlqi:
- case X86::BI__builtin_ia32_psrlwi: {
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
- const char *name = 0;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported shift intrinsic!");
- case X86::BI__builtin_ia32_pslldi:
- name = "pslldi";
- ID = Intrinsic::x86_mmx_psll_d;
- break;
- case X86::BI__builtin_ia32_psllqi:
- name = "psllqi";
- ID = Intrinsic::x86_mmx_psll_q;
- break;
- case X86::BI__builtin_ia32_psllwi:
- name = "psllwi";
- ID = Intrinsic::x86_mmx_psll_w;
- break;
- case X86::BI__builtin_ia32_psradi:
- name = "psradi";
- ID = Intrinsic::x86_mmx_psra_d;
- break;
- case X86::BI__builtin_ia32_psrawi:
- name = "psrawi";
- ID = Intrinsic::x86_mmx_psra_w;
- break;
- case X86::BI__builtin_ia32_psrldi:
- name = "psrldi";
- ID = Intrinsic::x86_mmx_psrl_d;
- break;
- case X86::BI__builtin_ia32_psrlqi:
- name = "psrlqi";
- ID = Intrinsic::x86_mmx_psrl_q;
- break;
- case X86::BI__builtin_ia32_psrlwi:
- name = "psrlwi";
- ID = Intrinsic::x86_mmx_psrl_w;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, name);
- }
- case X86::BI__builtin_ia32_cmpps: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
- return Builder.CreateCall(F, Ops, "cmpps");
- }
- case X86::BI__builtin_ia32_cmpss: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
- return Builder.CreateCall(F, Ops, "cmpss");
- }
case X86::BI__builtin_ia32_ldmxcsr: {
llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
@@ -2182,14 +2294,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
- case X86::BI__builtin_ia32_cmppd: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
- return Builder.CreateCall(F, Ops, "cmppd");
- }
- case X86::BI__builtin_ia32_cmpsd: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
- return Builder.CreateCall(F, Ops, "cmpsd");
- }
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
@@ -2268,6 +2372,44 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
+ case X86::BI__builtin_ia32_palignr256: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 32> Indices;
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned LaneStart = l * 16;
+ unsigned LaneEnd = (l+1) * 16;
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = shiftVal + i + LaneStart;
+ if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
+ }
+ }
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
case X86::BI__builtin_ia32_movntps:
case X86::BI__builtin_ia32_movntpd:
case X86::BI__builtin_ia32_movntdq:
@@ -2285,140 +2427,2013 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return SI;
}
// 3DNow!
- case X86::BI__builtin_ia32_pavgusb:
- case X86::BI__builtin_ia32_pf2id:
- case X86::BI__builtin_ia32_pfacc:
- case X86::BI__builtin_ia32_pfadd:
- case X86::BI__builtin_ia32_pfcmpeq:
- case X86::BI__builtin_ia32_pfcmpge:
- case X86::BI__builtin_ia32_pfcmpgt:
- case X86::BI__builtin_ia32_pfmax:
- case X86::BI__builtin_ia32_pfmin:
- case X86::BI__builtin_ia32_pfmul:
- case X86::BI__builtin_ia32_pfrcp:
- case X86::BI__builtin_ia32_pfrcpit1:
- case X86::BI__builtin_ia32_pfrcpit2:
- case X86::BI__builtin_ia32_pfrsqrt:
- case X86::BI__builtin_ia32_pfrsqit1:
- case X86::BI__builtin_ia32_pfrsqrtit1:
- case X86::BI__builtin_ia32_pfsub:
- case X86::BI__builtin_ia32_pfsubr:
- case X86::BI__builtin_ia32_pi2fd:
- case X86::BI__builtin_ia32_pmulhrw:
- case X86::BI__builtin_ia32_pf2iw:
- case X86::BI__builtin_ia32_pfnacc:
- case X86::BI__builtin_ia32_pfpnacc:
- case X86::BI__builtin_ia32_pi2fw:
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch(BuiltinID) {
- case X86::BI__builtin_ia32_pavgusb:
- name = "pavgusb";
- ID = Intrinsic::x86_3dnow_pavgusb;
- break;
- case X86::BI__builtin_ia32_pf2id:
- name = "pf2id";
- ID = Intrinsic::x86_3dnow_pf2id;
- break;
- case X86::BI__builtin_ia32_pfacc:
- name = "pfacc";
- ID = Intrinsic::x86_3dnow_pfacc;
- break;
- case X86::BI__builtin_ia32_pfadd:
- name = "pfadd";
- ID = Intrinsic::x86_3dnow_pfadd;
- break;
- case X86::BI__builtin_ia32_pfcmpeq:
- name = "pfcmpeq";
- ID = Intrinsic::x86_3dnow_pfcmpeq;
- break;
- case X86::BI__builtin_ia32_pfcmpge:
- name = "pfcmpge";
- ID = Intrinsic::x86_3dnow_pfcmpge;
- break;
- case X86::BI__builtin_ia32_pfcmpgt:
- name = "pfcmpgt";
- ID = Intrinsic::x86_3dnow_pfcmpgt;
- break;
- case X86::BI__builtin_ia32_pfmax:
- name = "pfmax";
- ID = Intrinsic::x86_3dnow_pfmax;
- break;
- case X86::BI__builtin_ia32_pfmin:
- name = "pfmin";
- ID = Intrinsic::x86_3dnow_pfmin;
- break;
- case X86::BI__builtin_ia32_pfmul:
- name = "pfmul";
- ID = Intrinsic::x86_3dnow_pfmul;
- break;
- case X86::BI__builtin_ia32_pfrcp:
- name = "pfrcp";
- ID = Intrinsic::x86_3dnow_pfrcp;
- break;
- case X86::BI__builtin_ia32_pfrcpit1:
- name = "pfrcpit1";
- ID = Intrinsic::x86_3dnow_pfrcpit1;
- break;
- case X86::BI__builtin_ia32_pfrcpit2:
- name = "pfrcpit2";
- ID = Intrinsic::x86_3dnow_pfrcpit2;
- break;
- case X86::BI__builtin_ia32_pfrsqrt:
- name = "pfrsqrt";
- ID = Intrinsic::x86_3dnow_pfrsqrt;
- break;
- case X86::BI__builtin_ia32_pfrsqit1:
- case X86::BI__builtin_ia32_pfrsqrtit1:
- name = "pfrsqit1";
- ID = Intrinsic::x86_3dnow_pfrsqit1;
- break;
- case X86::BI__builtin_ia32_pfsub:
- name = "pfsub";
- ID = Intrinsic::x86_3dnow_pfsub;
- break;
- case X86::BI__builtin_ia32_pfsubr:
- name = "pfsubr";
- ID = Intrinsic::x86_3dnow_pfsubr;
- break;
- case X86::BI__builtin_ia32_pi2fd:
- name = "pi2fd";
- ID = Intrinsic::x86_3dnow_pi2fd;
- break;
- case X86::BI__builtin_ia32_pmulhrw:
- name = "pmulhrw";
- ID = Intrinsic::x86_3dnow_pmulhrw;
- break;
- case X86::BI__builtin_ia32_pf2iw:
- name = "pf2iw";
- ID = Intrinsic::x86_3dnowa_pf2iw;
- break;
- case X86::BI__builtin_ia32_pfnacc:
- name = "pfnacc";
- ID = Intrinsic::x86_3dnowa_pfnacc;
- break;
- case X86::BI__builtin_ia32_pfpnacc:
- name = "pfpnacc";
- ID = Intrinsic::x86_3dnowa_pfpnacc;
- break;
- case X86::BI__builtin_ia32_pi2fw:
- name = "pi2fw";
- ID = Intrinsic::x86_3dnowa_pi2fw;
- break;
+ default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi:
name = "pswapd";
ID = Intrinsic::x86_3dnowa_pswapd;
break;
}
+ llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
+ Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, name);
}
}
}
+
+Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
+ ID = Intrinsic::hexagon_C2_cmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
+ ID = Intrinsic::hexagon_C2_cmpgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
+ ID = Intrinsic::hexagon_C2_cmpgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
+ ID = Intrinsic::hexagon_C2_cmpeqp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
+ ID = Intrinsic::hexagon_C2_cmpgtp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
+ ID = Intrinsic::hexagon_C2_cmpgtup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
+ ID = Intrinsic::hexagon_C2_bitsset; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
+ ID = Intrinsic::hexagon_C2_bitsclr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
+ ID = Intrinsic::hexagon_C2_cmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
+ ID = Intrinsic::hexagon_C2_cmpgti; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
+ ID = Intrinsic::hexagon_C2_cmpgtui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
+ ID = Intrinsic::hexagon_C2_cmpgei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
+ ID = Intrinsic::hexagon_C2_cmpgeui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
+ ID = Intrinsic::hexagon_C2_cmplt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
+ ID = Intrinsic::hexagon_C2_cmpltu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
+ ID = Intrinsic::hexagon_C2_bitsclri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_and:
+ ID = Intrinsic::hexagon_C2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_or:
+ ID = Intrinsic::hexagon_C2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_xor:
+ ID = Intrinsic::hexagon_C2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_andn:
+ ID = Intrinsic::hexagon_C2_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_not:
+ ID = Intrinsic::hexagon_C2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_orn:
+ ID = Intrinsic::hexagon_C2_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
+ ID = Intrinsic::hexagon_C2_pxfer_map; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_any8:
+ ID = Intrinsic::hexagon_C2_any8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_all8:
+ ID = Intrinsic::hexagon_C2_all8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
+ ID = Intrinsic::hexagon_C2_vitpack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mux:
+ ID = Intrinsic::hexagon_C2_mux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxii:
+ ID = Intrinsic::hexagon_C2_muxii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxir:
+ ID = Intrinsic::hexagon_C2_muxir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxri:
+ ID = Intrinsic::hexagon_C2_muxri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vmux:
+ ID = Intrinsic::hexagon_C2_vmux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mask:
+ ID = Intrinsic::hexagon_C2_mask; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
+ ID = Intrinsic::hexagon_A2_vcmpbeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
+ ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
+ ID = Intrinsic::hexagon_A2_vcmpheq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
+ ID = Intrinsic::hexagon_A2_vcmphgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
+ ID = Intrinsic::hexagon_A2_vcmphgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
+ ID = Intrinsic::hexagon_A2_vcmpweq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
+ ID = Intrinsic::hexagon_A2_vcmpwgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
+ ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
+ ID = Intrinsic::hexagon_C2_tfrpr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
+ ID = Intrinsic::hexagon_C2_tfrrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
+ ID = Intrinsic::hexagon_M2_mpysmi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsip:
+ ID = Intrinsic::hexagon_M2_macsip; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsin:
+ ID = Intrinsic::hexagon_M2_macsin; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
+ ID = Intrinsic::hexagon_M2_mpy_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
+ ID = Intrinsic::hexagon_M2_mpyu_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
+ ID = Intrinsic::hexagon_M2_mpyi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
+ ID = Intrinsic::hexagon_M2_mpyui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_maci:
+ ID = Intrinsic::hexagon_M2_maci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_acci:
+ ID = Intrinsic::hexagon_M2_acci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_accii:
+ ID = Intrinsic::hexagon_M2_accii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_nacci:
+ ID = Intrinsic::hexagon_M2_nacci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_naccii:
+ ID = Intrinsic::hexagon_M2_naccii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_subacc:
+ ID = Intrinsic::hexagon_M2_subacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
+ ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
+ ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
+ ID = Intrinsic::hexagon_M2_vmac2; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
+ ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
+ ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
+ ID = Intrinsic::hexagon_M2_vmac2es; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
+ ID = Intrinsic::hexagon_M2_vrmac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
+ ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
+ ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
+ ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
+ ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
+ ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
+ ID = Intrinsic::hexagon_M2_cmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
+ ID = Intrinsic::hexagon_M2_cmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
+ ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
+ ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
+ ID = Intrinsic::hexagon_M2_cmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
+ ID = Intrinsic::hexagon_M2_cmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
+ ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
+ ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
+ ID = Intrinsic::hexagon_M2_cnacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
+ ID = Intrinsic::hexagon_M2_cnacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
+ ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
+ ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
+ ID = Intrinsic::hexagon_M2_mmacls_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
+ ID = Intrinsic::hexagon_M2_mmacls_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
+ ID = Intrinsic::hexagon_M2_mmachs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
+ ID = Intrinsic::hexagon_M2_mmachs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
+ ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
+ ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
+ ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
+ ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
+ ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
+ ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
+ ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
+ ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
+ ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
+ ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
+ ID = Intrinsic::hexagon_M2_cmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
+ ID = Intrinsic::hexagon_M2_cmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
+ ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
+ ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
+ ID = Intrinsic::hexagon_S2_vcrotate; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_add:
+ ID = Intrinsic::hexagon_A2_add; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sub:
+ ID = Intrinsic::hexagon_A2_sub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsat:
+ ID = Intrinsic::hexagon_A2_addsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subsat:
+ ID = Intrinsic::hexagon_A2_subsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addi:
+ ID = Intrinsic::hexagon_A2_addi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_aslh:
+ ID = Intrinsic::hexagon_A2_aslh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_asrh:
+ ID = Intrinsic::hexagon_A2_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addp:
+ ID = Intrinsic::hexagon_A2_addp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
+ ID = Intrinsic::hexagon_A2_addpsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsp:
+ ID = Intrinsic::hexagon_A2_addsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subp:
+ ID = Intrinsic::hexagon_A2_subp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_neg:
+ ID = Intrinsic::hexagon_A2_neg; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negsat:
+ ID = Intrinsic::hexagon_A2_negsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abs:
+ ID = Intrinsic::hexagon_A2_abs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abssat:
+ ID = Intrinsic::hexagon_A2_abssat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vconj:
+ ID = Intrinsic::hexagon_A2_vconj; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negp:
+ ID = Intrinsic::hexagon_A2_negp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_absp:
+ ID = Intrinsic::hexagon_A2_absp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_max:
+ ID = Intrinsic::hexagon_A2_max; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxu:
+ ID = Intrinsic::hexagon_A2_maxu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_min:
+ ID = Intrinsic::hexagon_A2_min; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minu:
+ ID = Intrinsic::hexagon_A2_minu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxp:
+ ID = Intrinsic::hexagon_A2_maxp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxup:
+ ID = Intrinsic::hexagon_A2_maxup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minp:
+ ID = Intrinsic::hexagon_A2_minp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minup:
+ ID = Intrinsic::hexagon_A2_minup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfr:
+ ID = Intrinsic::hexagon_A2_tfr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
+ ID = Intrinsic::hexagon_A2_tfrsi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
+ ID = Intrinsic::hexagon_A2_tfrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
+ ID = Intrinsic::hexagon_A2_tfrpi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
+ ID = Intrinsic::hexagon_A2_zxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
+ ID = Intrinsic::hexagon_A2_sxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxth:
+ ID = Intrinsic::hexagon_A2_zxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxth:
+ ID = Intrinsic::hexagon_A2_sxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combinew:
+ ID = Intrinsic::hexagon_A2_combinew; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combineii:
+ ID = Intrinsic::hexagon_A2_combineii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
+ ID = Intrinsic::hexagon_A2_combine_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
+ ID = Intrinsic::hexagon_A2_combine_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
+ ID = Intrinsic::hexagon_A2_combine_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
+ ID = Intrinsic::hexagon_A2_combine_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfril:
+ ID = Intrinsic::hexagon_A2_tfril; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
+ ID = Intrinsic::hexagon_A2_tfrih; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_and:
+ ID = Intrinsic::hexagon_A2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_or:
+ ID = Intrinsic::hexagon_A2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xor:
+ ID = Intrinsic::hexagon_A2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_not:
+ ID = Intrinsic::hexagon_A2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
+ ID = Intrinsic::hexagon_M2_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subri:
+ ID = Intrinsic::hexagon_A2_subri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andir:
+ ID = Intrinsic::hexagon_A2_andir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orir:
+ ID = Intrinsic::hexagon_A2_orir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andp:
+ ID = Intrinsic::hexagon_A2_andp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orp:
+ ID = Intrinsic::hexagon_A2_orp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xorp:
+ ID = Intrinsic::hexagon_A2_xorp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_notp:
+ ID = Intrinsic::hexagon_A2_notp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
+ ID = Intrinsic::hexagon_A2_sxtw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sat:
+ ID = Intrinsic::hexagon_A2_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sath:
+ ID = Intrinsic::hexagon_A2_sath; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satuh:
+ ID = Intrinsic::hexagon_A2_satuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satub:
+ ID = Intrinsic::hexagon_A2_satub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satb:
+ ID = Intrinsic::hexagon_A2_satb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
+ ID = Intrinsic::hexagon_A2_vaddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
+ ID = Intrinsic::hexagon_A2_vaddubs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
+ ID = Intrinsic::hexagon_A2_vaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
+ ID = Intrinsic::hexagon_A2_vaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
+ ID = Intrinsic::hexagon_A2_vadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
+ ID = Intrinsic::hexagon_A2_vaddw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
+ ID = Intrinsic::hexagon_A2_vaddws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
+ ID = Intrinsic::hexagon_A2_svavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
+ ID = Intrinsic::hexagon_A2_svavghs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
+ ID = Intrinsic::hexagon_A2_svnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
+ ID = Intrinsic::hexagon_A2_svaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
+ ID = Intrinsic::hexagon_A2_svaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
+ ID = Intrinsic::hexagon_A2_svadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
+ ID = Intrinsic::hexagon_A2_svsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
+ ID = Intrinsic::hexagon_A2_svsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
+ ID = Intrinsic::hexagon_A2_svsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
+ ID = Intrinsic::hexagon_A2_vraddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
+ ID = Intrinsic::hexagon_A2_vraddub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
+ ID = Intrinsic::hexagon_M2_vradduh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
+ ID = Intrinsic::hexagon_A2_vsubub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
+ ID = Intrinsic::hexagon_A2_vsububs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
+ ID = Intrinsic::hexagon_A2_vsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
+ ID = Intrinsic::hexagon_A2_vsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
+ ID = Intrinsic::hexagon_A2_vsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
+ ID = Intrinsic::hexagon_A2_vsubw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
+ ID = Intrinsic::hexagon_A2_vsubws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
+ ID = Intrinsic::hexagon_A2_vabsh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
+ ID = Intrinsic::hexagon_A2_vabshsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
+ ID = Intrinsic::hexagon_A2_vabsw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
+ ID = Intrinsic::hexagon_A2_vabswsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
+ ID = Intrinsic::hexagon_M2_vabsdiffw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
+ ID = Intrinsic::hexagon_M2_vabsdiffh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
+ ID = Intrinsic::hexagon_A2_vrsadub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
+ ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
+ ID = Intrinsic::hexagon_A2_vavgub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
+ ID = Intrinsic::hexagon_A2_vavguh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
+ ID = Intrinsic::hexagon_A2_vavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
+ ID = Intrinsic::hexagon_A2_vnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
+ ID = Intrinsic::hexagon_A2_vavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
+ ID = Intrinsic::hexagon_A2_vnavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
+ ID = Intrinsic::hexagon_A2_vavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
+ ID = Intrinsic::hexagon_A2_vnavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
+ ID = Intrinsic::hexagon_A2_vavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
+ ID = Intrinsic::hexagon_A2_vnavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
+ ID = Intrinsic::hexagon_A2_vavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
+ ID = Intrinsic::hexagon_A2_vnavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
+ ID = Intrinsic::hexagon_A2_vavguw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
+ ID = Intrinsic::hexagon_A2_vavguwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
+ ID = Intrinsic::hexagon_A2_vavgubr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
+ ID = Intrinsic::hexagon_A2_vavguhr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
+ ID = Intrinsic::hexagon_A2_vavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
+ ID = Intrinsic::hexagon_A2_vnavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminh:
+ ID = Intrinsic::hexagon_A2_vminh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
+ ID = Intrinsic::hexagon_A2_vmaxh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminub:
+ ID = Intrinsic::hexagon_A2_vminub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
+ ID = Intrinsic::hexagon_A2_vmaxub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
+ ID = Intrinsic::hexagon_A2_vminuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
+ ID = Intrinsic::hexagon_A2_vmaxuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminw:
+ ID = Intrinsic::hexagon_A2_vminw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
+ ID = Intrinsic::hexagon_A2_vmaxw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
+ ID = Intrinsic::hexagon_A2_vminuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
+ ID = Intrinsic::hexagon_A2_vmaxuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
+ ID = Intrinsic::hexagon_S2_asr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
+ ID = Intrinsic::hexagon_S2_asl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
+ ID = Intrinsic::hexagon_S2_lsr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
+ ID = Intrinsic::hexagon_S2_lsl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
+ ID = Intrinsic::hexagon_S2_asr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
+ ID = Intrinsic::hexagon_S2_asl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
+ ID = Intrinsic::hexagon_S2_lsr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
+ ID = Intrinsic::hexagon_S2_lsl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
+ ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
+ ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
+ ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
+ ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
+ ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
+ ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
+ ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
+ ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
+ ID = Intrinsic::hexagon_S2_asr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
+ ID = Intrinsic::hexagon_S2_lsr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
+ ID = Intrinsic::hexagon_S2_asl_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
+ ID = Intrinsic::hexagon_S2_asr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
+ ID = Intrinsic::hexagon_S2_lsr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
+ ID = Intrinsic::hexagon_S2_asl_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
+ ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
+ ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
+ ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
+ ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
+ ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
+ ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
+ ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
+ ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
+ ID = Intrinsic::hexagon_S2_addasl_rrri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignib:
+ ID = Intrinsic::hexagon_S2_valignib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
+ ID = Intrinsic::hexagon_S2_valignrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
+ ID = Intrinsic::hexagon_S2_vspliceib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
+ ID = Intrinsic::hexagon_S2_vsplicerb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
+ ID = Intrinsic::hexagon_S2_vsplatrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
+ ID = Intrinsic::hexagon_S2_vsplatrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert:
+ ID = Intrinsic::hexagon_S2_insert; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu:
+ ID = Intrinsic::hexagon_S2_extractu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp:
+ ID = Intrinsic::hexagon_S2_insertp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup:
+ ID = Intrinsic::hexagon_S2_extractup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
+ ID = Intrinsic::hexagon_S2_insert_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
+ ID = Intrinsic::hexagon_S2_extractu_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
+ ID = Intrinsic::hexagon_S2_insertp_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
+ ID = Intrinsic::hexagon_S2_extractup_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
+ ID = Intrinsic::hexagon_S2_tstbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
+ ID = Intrinsic::hexagon_S2_setbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
+ ID = Intrinsic::hexagon_S2_togglebit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
+ ID = Intrinsic::hexagon_S2_clrbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
+ ID = Intrinsic::hexagon_S2_tstbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
+ ID = Intrinsic::hexagon_S2_setbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
+ ID = Intrinsic::hexagon_S2_togglebit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
+ ID = Intrinsic::hexagon_S2_clrbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
+ ID = Intrinsic::hexagon_S2_asr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
+ ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
+ ID = Intrinsic::hexagon_S2_asl_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
+ ID = Intrinsic::hexagon_S2_asr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
+ ID = Intrinsic::hexagon_S2_asl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
+ ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
+ ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
+ ID = Intrinsic::hexagon_S2_asr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
+ ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
+ ID = Intrinsic::hexagon_S2_asl_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
+ ID = Intrinsic::hexagon_S2_asr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
+ ID = Intrinsic::hexagon_S2_asl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
+ ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
+ ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
+ ID = Intrinsic::hexagon_S2_vrndpackwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
+ ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
+ ID = Intrinsic::hexagon_S2_vsxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
+ ID = Intrinsic::hexagon_S2_vzxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
+ ID = Intrinsic::hexagon_S2_vsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
+ ID = Intrinsic::hexagon_S2_svsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
+ ID = Intrinsic::hexagon_S2_svsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
+ ID = Intrinsic::hexagon_S2_vsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
+ ID = Intrinsic::hexagon_S2_vtrunohb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
+ ID = Intrinsic::hexagon_S2_vtrunewh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
+ ID = Intrinsic::hexagon_S2_vtrunowh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
+ ID = Intrinsic::hexagon_S2_vtrunehb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
+ ID = Intrinsic::hexagon_S2_vsxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
+ ID = Intrinsic::hexagon_S2_vzxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
+ ID = Intrinsic::hexagon_S2_vsatwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
+ ID = Intrinsic::hexagon_S2_vsatwuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_packhl:
+ ID = Intrinsic::hexagon_S2_packhl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_swiz:
+ ID = Intrinsic::hexagon_A2_swiz; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
+ ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
+ ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
+ ID = Intrinsic::hexagon_S2_shuffob; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
+ ID = Intrinsic::hexagon_S2_shuffeb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
+ ID = Intrinsic::hexagon_S2_shuffoh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
+ ID = Intrinsic::hexagon_S2_shuffeh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_parityp:
+ ID = Intrinsic::hexagon_S2_parityp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
+ ID = Intrinsic::hexagon_S2_lfsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
+ ID = Intrinsic::hexagon_S2_clbnorm; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clb:
+ ID = Intrinsic::hexagon_S2_clb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0:
+ ID = Intrinsic::hexagon_S2_cl0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1:
+ ID = Intrinsic::hexagon_S2_cl1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbp:
+ ID = Intrinsic::hexagon_S2_clbp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
+ ID = Intrinsic::hexagon_S2_cl0p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
+ ID = Intrinsic::hexagon_S2_cl1p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_brev:
+ ID = Intrinsic::hexagon_S2_brev; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct0:
+ ID = Intrinsic::hexagon_S2_ct0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct1:
+ ID = Intrinsic::hexagon_S2_ct1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_interleave:
+ ID = Intrinsic::hexagon_S2_interleave; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
+ ID = Intrinsic::hexagon_S2_deinterleave; break;
+
+ case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
+ ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_orn:
+ ID = Intrinsic::hexagon_A4_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andn:
+ ID = Intrinsic::hexagon_A4_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_ornp:
+ ID = Intrinsic::hexagon_A4_ornp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andnp:
+ ID = Intrinsic::hexagon_A4_andnp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineir:
+ ID = Intrinsic::hexagon_A4_combineir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineri:
+ ID = Intrinsic::hexagon_A4_combineri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
+ ID = Intrinsic::hexagon_C4_cmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
+ ID = Intrinsic::hexagon_C4_cmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
+ ID = Intrinsic::hexagon_C4_cmpltei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
+ ID = Intrinsic::hexagon_C4_cmplte; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
+ ID = Intrinsic::hexagon_C4_cmplteui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
+ ID = Intrinsic::hexagon_C4_cmplteu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
+ ID = Intrinsic::hexagon_A4_rcmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
+ ID = Intrinsic::hexagon_A4_rcmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
+ ID = Intrinsic::hexagon_A4_rcmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
+ ID = Intrinsic::hexagon_A4_rcmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
+ ID = Intrinsic::hexagon_C4_fastcorner9; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
+ ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
+ ID = Intrinsic::hexagon_C4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_and:
+ ID = Intrinsic::hexagon_C4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
+ ID = Intrinsic::hexagon_C4_and_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_or:
+ ID = Intrinsic::hexagon_C4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
+ ID = Intrinsic::hexagon_C4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_and:
+ ID = Intrinsic::hexagon_C4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
+ ID = Intrinsic::hexagon_C4_or_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_or:
+ ID = Intrinsic::hexagon_C4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
+ ID = Intrinsic::hexagon_S4_addaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
+ ID = Intrinsic::hexagon_S4_subaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
+ ID = Intrinsic::hexagon_M4_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_and:
+ ID = Intrinsic::hexagon_M4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_or:
+ ID = Intrinsic::hexagon_M4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
+ ID = Intrinsic::hexagon_M4_and_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
+ ID = Intrinsic::hexagon_M4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
+ ID = Intrinsic::hexagon_M4_xor_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
+ ID = Intrinsic::hexagon_M4_xor_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
+ ID = Intrinsic::hexagon_M4_xor_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_and:
+ ID = Intrinsic::hexagon_M4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_or:
+ ID = Intrinsic::hexagon_M4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
+ ID = Intrinsic::hexagon_M4_or_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
+ ID = Intrinsic::hexagon_M4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
+ ID = Intrinsic::hexagon_S4_or_andix; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
+ ID = Intrinsic::hexagon_S4_or_andi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
+ ID = Intrinsic::hexagon_S4_or_ori; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
+ ID = Intrinsic::hexagon_A4_modwrapu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
+ ID = Intrinsic::hexagon_A4_cround_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
+ ID = Intrinsic::hexagon_A4_round_ri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
+ ID = Intrinsic::hexagon_A4_round_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
+ ID = Intrinsic::hexagon_A4_round_ri_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
+ ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+
+ }
+
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+}
+
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
@@ -2506,5 +4521,4 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops, "");
}
}
- return 0;
}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index b5e6e0d7d993..7c08650d278e 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -196,7 +196,8 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
GlobalDecl(ctor, Ctor_Base)))
return;
- const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(ctor, ctorType);
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
@@ -218,11 +219,10 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
- if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(ctor, ctorType);
+ if (!fnInfo)
+ fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
- const FunctionProtoType *proto = ctor->getType()->castAs<FunctionProtoType>();
- llvm::FunctionType *fnType =
- getTypes().GetFunctionType(*fnInfo, proto->isVariadic());
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -260,7 +260,8 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
return;
- const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(dtor, dtorType);
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXDestructor(dtor, dtorType);
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
@@ -282,11 +283,9 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
- if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(dtor, dtorType);
-
- llvm::FunctionType *fnType =
- getTypes().GetFunctionType(*fnInfo, false);
+ if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -359,12 +358,10 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
- const CGFunctionInfo *FInfo =
- &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+ const CGFunctionInfo &FInfo =
+ CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
Ty = Ty->getPointerTo()->getPointerTo();
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 248448ccdc2e..befebbecbddb 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -49,9 +49,8 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
return llvm::Constant::getNullValue(FTy->getPointerTo());
}
@@ -71,6 +70,11 @@ llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
return GetBogusMemberPointer(CGM, E->getType());
}
+llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
llvm::Value *
CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -90,11 +94,6 @@ CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
}
llvm::Constant *
-CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
- return GetBogusMemberPointer(CGM, E->getType());
-}
-
-llvm::Constant *
CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
return GetBogusMemberPointer(CGM, QualType(MPT, 0));
}
@@ -110,6 +109,10 @@ llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
return GetBogusMemberPointer(CGM, QualType(MPT, 0));
}
+llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ return GetBogusMemberPointer(CGM, MPT);
+}
+
bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
// Fake answer.
return true;
@@ -169,6 +172,28 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
const VarDecl &D,
- llvm::GlobalVariable *GV) {
+ llvm::GlobalVariable *GV,
+ bool PerformInit) {
ErrorUnsupportedABI(CGF, "static local variable initialization");
}
+
+/// Returns the adjustment, in bytes, required for the given
+/// member-pointer operation. Returns null if no adjustment is
+/// required.
+llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ QualType derivedType;
+ if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
+ derivedType = E->getSubExpr()->getType();
+ else
+ derivedType = E->getType();
+
+ const CXXRecordDecl *derivedClass =
+ derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+
+ return CGM.GetNonVirtualBaseClassOffset(derivedClass,
+ E->path_begin(),
+ E->path_end());
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index c2abf358329c..4e045f5f3275 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -42,17 +42,17 @@ namespace CodeGen {
class CGCXXABI {
protected:
CodeGenModule &CGM;
- llvm::OwningPtr<MangleContext> MangleCtx;
+ OwningPtr<MangleContext> MangleCtx;
CGCXXABI(CodeGenModule &CGM)
: CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
- return CGF.CXXThisDecl;
+ return CGF.CXXABIThisDecl;
}
llvm::Value *&getThisValue(CodeGenFunction &CGF) {
- return CGF.CXXThisValue;
+ return CGF.CXXABIThisValue;
}
ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
@@ -100,16 +100,16 @@ public:
llvm::Value *MemPtr,
const MemberPointerType *MPT);
- /// Perform a derived-to-base or base-to-derived member pointer
- /// conversion.
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion.
virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src);
- /// Perform a derived-to-base or base-to-derived member pointer
- /// conversion on a constant member pointer.
- virtual llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E);
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion on a constant value.
+ virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
/// Return true if the given member pointer can be zero-initialized
/// (in the C++ sense) with an LLVM zeroinitializer.
@@ -125,6 +125,9 @@ public:
virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset);
+ /// Create a member pointer for the given member pointer constant.
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
/// Emit a comparison between two member pointers. Returns an i1.
virtual llvm::Value *
EmitMemberPointerComparison(CodeGenFunction &CGF,
@@ -139,6 +142,15 @@ public:
llvm::Value *MemPtr,
const MemberPointerType *MPT);
+protected:
+ /// A utility method for computing the offset required for the given
+ /// base-to-derived or derived-to-base member-pointer conversion.
+ /// Does not handle virtual conversions (in case we ever fully
+ /// support an ABI that allows this). Returns null if no adjustment
+ /// is required.
+ llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+
+public:
/// Build the signature of the given constructor variant by adding
/// any required parameters. For convenience, ResTy has been
/// initialized to 'void', and ArgTys has been initialized with the
@@ -228,12 +240,14 @@ public:
/// Emits the guarded initializer and destructor setup for the given
/// variable, given that it couldn't be emitted as a constant.
+ /// If \p PerformInit is false, the initialization has been folded to a
+ /// constant and should not be performed.
///
/// The variable may be:
/// - a static local variable
/// - a static data member of a class template instantiation
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::GlobalVariable *DeclPtr);
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
};
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 6ae2d0c96775..4455f1a0865b 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -17,6 +17,7 @@
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -66,29 +67,39 @@ static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
+/// Arrange the argument and result information for a value of the
+/// given unprototyped function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
- return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
- SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo());
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
}
-/// \param Args - contains any initial parameters besides those
-/// in the formal type
-static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &ArgTys,
+/// Arrange the argument and result information for a value of the
+/// given function type, on top of any implicit parameters already
+/// stored.
+static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &argTypes,
CanQual<FunctionProtoType> FTP) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ argTypes.push_back(FTP->getArgType(i));
+ CanQualType resultType = FTP->getResultType().getUnqualifiedType();
+ return CGT.arrangeFunctionType(resultType, argTypes,
+ FTP->getExtInfo(), required);
}
+/// Arrange the argument and result information for a value of the
+/// given function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
- SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP);
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeFunctionType(*this, argTypes, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -111,162 +122,250 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
return CC_C;
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP) {
- SmallVector<CanQualType, 16> ArgTys;
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- ArgTys.push_back(GetThisType(Context, RD));
+ argTypes.push_back(GetThisType(Context, RD));
- return ::getFunctionInfo(*this, ArgTys,
+ return ::arrangeFunctionType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
-
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
- // Add the 'this' pointer unless this is a static method.
- if (MD->isInstance())
- ArgTys.push_back(GetThisType(Context, MD->getParent()));
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ }
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
+ return arrangeFunctionType(prototype);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
- CXXCtorType Type) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration
+/// or definition to the given constructor variant.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
+ CXXCtorType ctorKind) {
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
+ argTypes.push_back(FTP->getArgType(i));
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
- CXXDtorType Type) {
- SmallVector<CanQualType, 2> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration,
+/// definition, or call to the given destructor variant. It so
+/// happens that all three cases produce the same information.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType dtorKind) {
+ SmallVector<CanQualType, 2> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
+ RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
- return getFunctionInfo(MD);
+ return arrangeCXXMethodDeclaration(MD);
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
assert(isa<FunctionType>(FTy));
- if (isa<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeFunctionType(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
+ }
+
assert(isa<FunctionProtoType>(FTy));
- return getFunctionInfo(FTy.getAs<FunctionProtoType>());
+ return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
- ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
e = MD->param_end(); i != e; ++i) {
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
FunctionType::ExtInfo einfo;
einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
MD->hasAttr<NSReturnsRetainedAttr>())
einfo = einfo.withProducesResult(true);
- return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return getFunctionInfo(CD, GD.getCtorType());
+ return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return getFunctionInfo(DD, GD.getDtorType());
+ return arrangeCXXDestructor(DD, GD.getDtorType());
+
+ return arrangeFunctionDeclaration(FD);
+}
- return getFunctionInfo(FD);
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ RequiredArgs required = RequiredArgs::All;
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumArgs());
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(0);
+ }
+
+ return arrangeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const CallArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
+ const FunctionArgList &args,
+ const FunctionType::ExtInfo &info,
+ bool isVariadic) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
- SmallVector<CanQualType, 1> args;
- return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
- const SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info) {
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
#ifndef NDEBUG
- for (SmallVectorImpl<CanQualType>::const_iterator
- I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ for (ArrayRef<CanQualType>::const_iterator
+ I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
assert(I->isCanonicalAsParam());
#endif
- unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
- void *InsertPos = 0;
- CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ void *insertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
if (FI)
return *FI;
- // Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
- Info.getHasRegParm(), Info.getRegParm(), ResTy,
- ArgTys.data(), ArgTys.size());
- FunctionInfos.InsertNode(FI, InsertPos);
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
- bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
- assert(Inserted && "Recursively being processed?");
+ bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ assert(inserted && "Recursively being processed?");
// Compute ABI information.
getABIInfo().computeInfo(*FI);
@@ -274,39 +373,42 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
- ABIArgInfo &RetInfo = FI->getReturnInfo();
- if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
- RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
I->info.setCoerceToType(ConvertType(I->type));
- bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
- assert(Erased && "Not in set?");
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
return *FI;
}
-CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool returnsRetained,
- bool _HasRegParm, unsigned _RegParm,
- CanQualType ResTy,
- const CanQualType *ArgTys,
- unsigned NumArgTys)
- : CallingConvention(_CallingConvention),
- EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
- HasRegParm(_HasRegParm), RegParm(_RegParm)
-{
- NumArgs = NumArgTys;
-
- // FIXME: Coallocate with the CGFunctionInfo object.
- Args = new ArgInfo[1 + NumArgTys];
- Args[0].type = ResTy;
- for (unsigned i = 0; i != NumArgTys; ++i)
- Args[1 + i].type = ArgTys[i];
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
}
/***/
@@ -366,7 +468,7 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
QualType EltTy = CT->getElementType();
llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
- llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
} else {
EmitStoreThroughLValue(RValue::get(AI), LV);
@@ -521,7 +623,9 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
SI->setAlignment(1);
}
} else {
- CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
}
}
@@ -611,20 +715,24 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
return false;
}
-llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
- const CGFunctionInfo &FI = getFunctionInfo(GD);
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ }
+ }
- // For definition purposes, don't consider a K&R function variadic.
- bool Variadic = false;
- if (const FunctionProtoType *FPT =
- cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
- Variadic = FPT->isVariadic();
+ return false;
+}
- return GetFunctionType(FI, Variadic);
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
}
llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
assert(Inserted && "Recursively being processed?");
@@ -675,6 +783,9 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -697,7 +808,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
- return llvm::FunctionType::get(resultType, argTypes, isVariadic);
+ return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -709,18 +820,18 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
else
- Info = &getFunctionInfo(MD);
- return GetFunctionType(*Info, FPT->isVariadic());
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv) {
- unsigned FuncAttrs = 0;
- unsigned RetAttrs = 0;
+ llvm::Attributes FuncAttrs;
+ llvm::Attributes RetAttrs;
CallingConv = FI.getEffectiveCallingConvention();
@@ -806,7 +917,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
- unsigned Attributes = 0;
+ llvm::Attributes Attrs;
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -814,20 +925,24 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::SExt;
+ Attrs |= llvm::Attribute::SExt;
else if (ParamType->isUnsignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::ZExt;
+ Attrs |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ (ParamType->isIntegerType() || ParamType->isPointerType() ||
+ ParamType->isReferenceType())) {
RegParm -=
(Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
if (RegParm >= 0)
- Attributes |= llvm::Attribute::InReg;
+ Attrs |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
+ // Increment Index if there is padding.
+ Index += (AI.getPaddingType() != 0);
+
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
@@ -835,9 +950,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
- Attributes |= llvm::Attribute::ByVal;
+ Attrs |= llvm::Attribute::ByVal;
- Attributes |=
+ Attrs |=
llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
@@ -859,8 +974,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
}
- if (Attributes)
- PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ if (Attrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
++Index;
}
if (FuncAttrs)
@@ -970,6 +1085,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
@@ -991,7 +1110,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
@@ -1015,15 +1134,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (STy && STy->getNumElements() > 1) {
+ uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ if (SrcSize <= DstSize) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ llvm::AllocaInst *TempAlloca =
+ CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
+ TempAlloca->setAlignment(AlignmentToUse);
+ llvm::Value *TempV = TempAlloca;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -1047,10 +1187,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
- EmitParmDecl(*Arg, Temp, ArgNo);
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
+ CharUnits Align = getContext().getDeclAlign(Arg);
+ Alloca->setAlignment(Align.getQuantity());
+ LValue LV = MakeAddrLValue(Alloca, Ty, Align);
+ llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
+ EmitParmDecl(*Arg, Alloca, ArgNo);
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
@@ -1076,6 +1218,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
/// Try to emit a fused autorelease of a return result.
static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
@@ -1154,9 +1307,54 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, resultType);
}
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return 0;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return 0;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ return 0;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ return 0;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
// At -O0, try to emit a fused retain/autorelease.
if (CGF.shouldUseFusedARCCalls())
if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
@@ -1165,6 +1363,44 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.EmitARCAutoreleaseReturnValue(result);
}
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return 0;
+ llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
+ if (!store) return 0;
+ if (store->getPointerOperand() != CGF.ReturnValue) return 0;
+ assert(!store->isAtomic() && !store->isVolatile()); // see below
+ return store;
+ }
+
+ llvm::StoreInst *store =
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+ if (!store) return 0;
+
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!store->isAtomic() && !store->isVolatile());
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return 0;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1199,16 +1435,9 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
- } else {
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
// Get the stored value and nuke the now-dead store.
RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
@@ -1219,6 +1448,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
ReturnValue = 0;
}
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
}
} else {
llvm::Value *V = ReturnValue;
@@ -1236,7 +1469,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// In ARC, end functions that return a retainable type with a call
// to objc_autoreleaseReturnValue.
if (AutoreleaseResult) {
- assert(getLangOptions().ObjCAutoRefCount &&
+ assert(getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() &&
RetTy->isObjCRetainableType());
RV = emitAutoreleaseOfResult(*this, RV);
@@ -1431,7 +1664,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
- assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -1450,14 +1683,23 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
- type, /*NeedsCopy*/true);
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
return;
}
args.add(EmitAnyExprToTemp(E), type);
}
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
llvm::CallSite
@@ -1465,14 +1707,22 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
if (!InvokeDest)
- return Builder.CreateCall(Callee, Args, Name);
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
- llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
- llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
- Args, Name);
- EmitBlock(ContBB);
- return Invoke;
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return Inst;
}
llvm::CallSite
@@ -1501,8 +1751,11 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
LValue LV = MakeAddrLValue(EltAddr, EltTy);
RValue EltRV;
- if (CodeGenFunction::hasAggregateLLVMType(EltTy))
- EltRV = RValue::getAggregate(LV.getAddress());
+ if (EltTy->isAnyComplexType())
+ // FIXME: Volatile?
+ EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = LV.asAggregateRValue();
else
EltRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
@@ -1519,13 +1772,16 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
// FIXME: What are the right qualifiers here?
LValue LV = EmitLValueForField(Addr, FD, 0);
RValue FldRV;
- if (CodeGenFunction::hasAggregateLLVMType(FT))
- FldRV = RValue::getAggregate(LV.getAddress());
+ if (FT->isAnyComplexType())
+ // FIXME: Volatile?
+ FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(FT))
+ FldRV = LV.asAggregateRValue();
else
FldRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
}
- } else if (isa<ComplexType>(Ty)) {
+ } else if (Ty->isAnyComplexType()) {
ComplexPairTy CV = RV.getComplexVal();
Args.push_back(CV.first);
Args.push_back(CV.second);
@@ -1639,6 +1895,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
@@ -1769,6 +2031,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRgen to discard
// unreachable code.
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 24ed366dd860..dead7bd45910 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -98,6 +98,55 @@ namespace CodeGen {
SmallVector<Writeback, 1> Writebacks;
};
+ /// A class for recording the number of arguments that a function
+ /// signature requires.
+ class RequiredArgs {
+ /// The number of required arguments, or ~0 if the signature does
+ /// not permit optional arguments.
+ unsigned NumRequired;
+ public:
+ enum All_t { All };
+
+ RequiredArgs(All_t _) : NumRequired(~0U) {}
+ explicit RequiredArgs(unsigned n) : NumRequired(n) {
+ assert(n != ~0U);
+ }
+
+ /// Compute the arguments required by the given formal prototype,
+ /// given that there may be some additional, non-formal arguments
+ /// in play.
+ static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
+ unsigned additional) {
+ if (!prototype->isVariadic()) return All;
+ return RequiredArgs(prototype->getNumArgs() + additional);
+ }
+
+ static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
+ return forPrototypePlus(prototype, 0);
+ }
+
+ static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
+ return forPrototype(prototype.getTypePtr());
+ }
+
+ static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
+ unsigned additional) {
+ return forPrototypePlus(prototype.getTypePtr(), additional);
+ }
+
+ bool allowsOptionalArgs() const { return NumRequired != ~0U; }
+ bool getNumRequiredArgs() const {
+ assert(allowsOptionalArgs());
+ return NumRequired;
+ }
+
+ unsigned getOpaqueData() const { return NumRequired; }
+ static RequiredArgs getFromOpaqueData(unsigned value) {
+ if (value == ~0U) return All;
+ return RequiredArgs(value);
+ }
+ };
+
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
@@ -114,50 +163,71 @@ namespace CodeGen {
/// The LLVM::CallingConv to use for this function (as specified by the
/// user).
- unsigned CallingConvention;
+ unsigned CallingConvention : 8;
/// The LLVM::CallingConv to actually use for this function, which may
/// depend on the ABI.
- unsigned EffectiveCallingConvention;
+ unsigned EffectiveCallingConvention : 8;
+
+ /// The clang::CallingConv that this was originally created with.
+ unsigned ASTCallingConvention : 8;
/// Whether this function is noreturn.
- bool NoReturn;
+ unsigned NoReturn : 1;
/// Whether this function is returns-retained.
- bool ReturnsRetained;
+ unsigned ReturnsRetained : 1;
+
+ /// How many arguments to pass inreg.
+ unsigned HasRegParm : 1;
+ unsigned RegParm : 4;
+
+ RequiredArgs Required;
unsigned NumArgs;
- ArgInfo *Args;
+ ArgInfo *getArgsBuffer() {
+ return reinterpret_cast<ArgInfo*>(this+1);
+ }
+ const ArgInfo *getArgsBuffer() const {
+ return reinterpret_cast<const ArgInfo*>(this + 1);
+ }
- /// How many arguments to pass inreg.
- bool HasRegParm;
- unsigned RegParm;
+ CGFunctionInfo() : Required(RequiredArgs::All) {}
public:
+ static CGFunctionInfo *create(unsigned llvmCC,
+ const FunctionType::ExtInfo &extInfo,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required);
+
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- bool ReturnsRetained, bool HasRegParm, unsigned RegParm,
- CanQualType ResTy,
- const CanQualType *ArgTys, unsigned NumArgTys);
- ~CGFunctionInfo() { delete[] Args; }
-
- const_arg_iterator arg_begin() const { return Args + 1; }
- const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
- arg_iterator arg_begin() { return Args + 1; }
- arg_iterator arg_end() { return Args + 1 + NumArgs; }
+ const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
+ const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
+ arg_iterator arg_begin() { return getArgsBuffer() + 1; }
+ arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
unsigned arg_size() const { return NumArgs; }
+ bool isVariadic() const { return Required.allowsOptionalArgs(); }
+ RequiredArgs getRequiredArgs() const { return Required; }
+
bool isNoReturn() const { return NoReturn; }
- /// In ARR, whether this function retains its return value. This
+ /// In ARC, whether this function retains its return value. This
/// is not always reliable for call sites.
bool isReturnsRetained() const { return ReturnsRetained; }
- /// getCallingConvention - Return the user specified calling
+ /// getASTCallingConvention() - Return the AST-specified calling
/// convention.
+ CallingConv getASTCallingConvention() const {
+ return CallingConv(ASTCallingConvention);
+ }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention, which has been translated into an LLVM CC.
unsigned getCallingConvention() const { return CallingConvention; }
/// getEffectiveCallingConvention - Return the actual calling convention to
@@ -172,36 +242,44 @@ namespace CodeGen {
bool getHasRegParm() const { return HasRegParm; }
unsigned getRegParm() const { return RegParm; }
- CanQualType getReturnType() const { return Args[0].type; }
+ FunctionType::ExtInfo getExtInfo() const {
+ return FunctionType::ExtInfo(isNoReturn(),
+ getHasRegParm(), getRegParm(),
+ getASTCallingConvention(),
+ isReturnsRetained());
+ }
+
+ CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
- ABIArgInfo &getReturnInfo() { return Args[0].info; }
- const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+ ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddInteger(getCallingConvention());
+ ID.AddInteger(getASTCallingConvention());
ID.AddBoolean(NoReturn);
ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
+ ID.AddInteger(Required.getOpaqueData());
getReturnType().Profile(ID);
for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
it->type.Profile(ID);
}
- template<class Iterator>
static void Profile(llvm::FoldingSetNodeID &ID,
- const FunctionType::ExtInfo &Info,
- CanQualType ResTy,
- Iterator begin,
- Iterator end) {
- ID.AddInteger(Info.getCC());
- ID.AddBoolean(Info.getNoReturn());
- ID.AddBoolean(Info.getProducesResult());
- ID.AddBoolean(Info.getHasRegParm());
- ID.AddInteger(Info.getRegParm());
- ResTy.Profile(ID);
- for (; begin != end; ++begin) {
- CanQualType T = *begin; // force iterator to be over canonical types
- T.Profile(ID);
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes) {
+ ID.AddInteger(info.getCC());
+ ID.AddBoolean(info.getNoReturn());
+ ID.AddBoolean(info.getProducesResult());
+ ID.AddBoolean(info.getHasRegParm());
+ ID.AddInteger(info.getRegParm());
+ ID.AddInteger(required.getOpaqueData());
+ resultType.Profile(ID);
+ for (ArrayRef<CanQualType>::iterator
+ i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
+ i->Profile(ID);
}
}
};
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index c28ecc05ded6..6303e2069d58 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGBlocks.h"
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
@@ -95,7 +96,6 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
if (Offset.isPositive()) {
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
@@ -125,8 +125,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
BaseOffset = NonVirtualOffset;
// Apply the base offset.
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
return ThisPtr;
@@ -250,9 +249,9 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
}
// Apply the offset.
- Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
- Value = Builder.CreateSub(Value, NonVirtualOffset);
- Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+ Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
@@ -397,16 +396,16 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
BaseClassDecl,
isBaseVirtual);
-
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Qualifiers(),
+ AggValueSlot::forAddr(V, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.CGM.getLangOptions().Exceptions &&
+ if (CGF.CGM.getLangOpts().Exceptions &&
!BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
@@ -414,47 +413,59 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
+ Expr *Init,
llvm::Value *ArrayIndexVar,
- CXXCtorInitializer *MemberInit,
QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
unsigned Index) {
- if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::RunCleanupsScope Cleanups(CGF);
-
- llvm::Value *Dest = LHS.getAddress();
- if (ArrayIndexVar) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
- }
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+ { // Scope for Cleanups.
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
+ }
- if (!CGF.hasAggregateLLVMType(T)) {
- LValue lvalue = CGF.MakeAddrLValue(Dest, T);
- CGF.EmitScalarInit(MemberInit->getInit(), /*decl*/ 0, lvalue, false);
- } else if (T->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
- LHS.isVolatileQualified());
- } else {
- AggValueSlot Slot =
- AggValueSlot::forAddr(Dest, LHS.getQuals(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
+ LV.isVolatileQualified());
+ } else {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ }
}
-
+
+ // Now, outside of the initializer cleanup scope, destroy the backing array
+ // for a std::initializer_list member.
+ CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+
return;
}
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
llvm::Value *IndexVar
- = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
assert(IndexVar && "Array index variable not loaded");
// Initialize this index variable to zero.
@@ -490,8 +501,8 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
- Array->getElementType(), Index + 1);
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
}
CGF.EmitBlock(ContinueBlock);
@@ -511,19 +522,15 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
namespace {
struct CallMemberDtor : EHScopeStack::Cleanup {
- FieldDecl *Field;
+ llvm::Value *V;
CXXDestructorDecl *Dtor;
- CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
- : Field(Field), Dtor(Dtor) {}
+ CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
+ : V(V), Dtor(Dtor) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- // FIXME: Is this OK for C++0x delegating constructors?
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
-
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
+ V);
}
};
}
@@ -533,7 +540,7 @@ static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
return Moving ? Record->hasTrivialMoveConstructor() :
Record->hasTrivialCopyConstructor();
}
-
+
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXCtorInitializer *MemberInit,
@@ -545,7 +552,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// non-static data member initializers.
FieldDecl *Field = MemberInit->getAnyMember();
- QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+ QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS;
@@ -559,67 +566,83 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
+ LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ if (!hasAggregateLLVMType(FieldType)) {
if (LHS.isSimple()) {
- CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
+ EmitExprAsInit(Init, Field, LHS, false);
} else {
- RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS);
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
}
- } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
- LHS.isVolatileQualified());
+ } else if (FieldType->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
} else {
llvm::Value *ArrayIndexVar = 0;
- const ConstantArrayType *Array
- = CGF.getContext().getAsConstantArrayType(FieldType);
- if (Array && Constructor->isImplicitlyDefined() &&
- Constructor->isCopyOrMoveConstructor()) {
- llvm::Type *SizeTy
- = CGF.ConvertType(CGF.getContext().getSizeType());
+ if (ArrayIndexes.size()) {
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
- QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
- BasePtr);
- LHS = CGF.MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
- ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+ Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of PODs or classes with trivial copy
- // constructors, perform a single aggregate copy.
- const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
- if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && hasTrivialCopyOrMoveConstructor(Record,
- Constructor->isMoveConstructor()))) {
- // Find the source pointer. We knows it's the last argument because
- // we know we're in a copy constructor.
- unsigned SrcArgIndex = Args.size() - 1;
- llvm::Value *SrcPtr
- = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
- LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
-
- // Copy the aggregate.
- CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
- LHS.isVolatileQualified());
- return;
- }
// Emit the block variables for the array indices, if any.
- for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
- CGF.EmitAutoVarDecl(*MemberInit->getArrayIndex(I));
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
}
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
- if (!CGF.CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -631,8 +654,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor())
- CGF.EHStack.pushCleanup<CallMemberDtor>(EHCleanup, Field,
- RD->getDestructor());
+ EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
+ RD->getDestructor());
}
}
@@ -708,6 +731,9 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
// Emit the constructor prologue, i.e. the base and member
// initializers.
EmitCtorPrologue(Ctor, CtorType, Args);
@@ -912,7 +938,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
// -fapple-kext must inline any call to this dtor into
// the caller's body.
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
break;
}
@@ -940,13 +966,13 @@ namespace {
class DestroyField : public EHScopeStack::Cleanup {
const FieldDecl *field;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : field(field), destroyer(*destroyer),
+ : field(field), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -1039,6 +1065,10 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
QualType::DestructionKind dtorKind = type.isDestructedType();
if (!dtorKind) continue;
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
CleanupKind cleanupKind = getCleanupKind(dtorKind);
EHStack.pushCleanup<DestroyField>(cleanupKind, field,
getDestroyer(dtorKind),
@@ -1145,7 +1175,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Evaluate the constructor and its arguments in a regular
// partial-destroy cleanup.
- if (getLangOptions().Exceptions &&
+ if (getLangOpts().Exceptions &&
!ctor->getParent()->hasTrivialDestructor()) {
Destroyer *destroyer = destroyCXXObject;
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
@@ -1192,8 +1222,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGDebugInfo *DI = getDebugInfo();
if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
- // If debug info for this class has been emitted then this is the right time
- // to do so.
+ // If debug info for this class has not been emitted then this is the
+ // right time to do so.
const CXXRecordDecl *Parent = D->getParent();
DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
Parent->getLocation());
@@ -1273,7 +1303,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
ReturnValueSlot(), Args, D);
}
@@ -1309,7 +1339,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
ReturnValueSlot(), DelegateArgs, Ctor);
}
@@ -1338,8 +1368,10 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
+ QualType Ty = getContext().getTagDeclType(Ctor->getParent());
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Qualifiers(),
+ AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1347,7 +1379,7 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
const CXXRecordDecl *ClassDecl = Ctor->getParent();
- if (CGM.getLangOptions().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
CXXDtorType Type =
CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
@@ -1364,7 +1396,7 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
llvm::Value *Callee = 0;
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
DD->getParent());
@@ -1485,7 +1517,8 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
llvm::Type *AddressPointPtrTy =
VTableAddressPoint->getType()->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
- Builder.CreateStore(VTableAddressPoint, VTableField);
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
}
void
@@ -1568,7 +1601,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
llvm::Type *Ty) {
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
- return Builder.CreateLoad(VTablePtrSrc, "vtable");
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ return VTable;
}
static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
@@ -1682,7 +1717,7 @@ static bool UseVirtualCall(ASTContext &Context,
// When building with -fapple-kext, all calls must go through the vtable since
// the kernel linker can do runtime patching of vtables.
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return true;
return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
@@ -1692,13 +1727,110 @@ llvm::Value *
CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
llvm::Value *This) {
- const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
+ llvm::FunctionType *fnType =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodDeclaration(MD));
if (UseVirtualCall(getContext(), E, MD))
- return BuildVirtualCall(MD, This, Ty);
+ return BuildVirtualCall(MD, This, fnType);
+
+ return CGM.GetAddrOfFunction(MD, fnType);
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs) {
+ // Lookup the call operator
+ DeclarationName Name
+ = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
+ CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
+ const FunctionProtoType *FPT =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the address of the call operator.
+ GlobalDecl GD(CallOperator);
+ const CGFunctionInfo &CalleeFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+
+ // Forward the returned value
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (BlockDecl::param_const_iterator I = BD->param_begin(),
+ E = BD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
- return CGM.GetAddrOfFunction(MD, Ty);
+ EmitLambdaDelegatingInvokeBody(MD);
}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index b2d0786cb6cd..b00e2a21bf77 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -84,7 +84,6 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
}
llvm_unreachable("bad saved r-value kind");
- return RValue();
}
/// Push an entry of the given size onto this protected-scope stack.
@@ -251,8 +250,7 @@ void CodeGenFunction::initFullExprCleanup() {
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
- llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- new llvm::StoreInst(Builder.getFalse(), active, &block->back());
+ setBeforeOutermostConditional(Builder.getFalse(), active);
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), active);
@@ -504,9 +502,9 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
// The only uses should be fixup switches.
llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
- if (si->getNumCases() == 2 && si->getDefaultDest() == unreachableBB) {
+ if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
// Replace the switch with a branch.
- llvm::BranchInst::Create(si->getSuccessor(1), si);
+ llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
// The switch operand is a load from the cleanup-dest alloca.
llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
@@ -1000,57 +998,75 @@ enum ForActivation_t {
/// extra uses *after* the change-over point.
static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
EHScopeStack::stable_iterator C,
- ForActivation_t Kind) {
+ ForActivation_t kind,
+ llvm::Instruction *dominatingIP) {
EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
- // We always need the flag if we're activating the cleanup, because
- // we have to assume that the current location doesn't necessarily
- // dominate all future uses of the cleanup.
- bool NeedFlag = (Kind == ForActivation);
+ // We always need the flag if we're activating the cleanup in a
+ // conditional context, because we have to assume that the current
+ // location doesn't necessarily dominate the cleanup's code.
+ bool isActivatedInConditional =
+ (kind == ForActivation && CGF.isInConditionalBranch());
+
+ bool needFlag = false;
// Calculate whether the cleanup was used:
// - as a normal cleanup
- if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) {
+ if (Scope.isNormalCleanup() &&
+ (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
Scope.setTestFlagInNormalCleanup();
- NeedFlag = true;
+ needFlag = true;
}
// - as an EH cleanup
- if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) {
+ if (Scope.isEHCleanup() &&
+ (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
Scope.setTestFlagInEHCleanup();
- NeedFlag = true;
+ needFlag = true;
}
// If it hasn't yet been used as either, we're done.
- if (!NeedFlag) return;
+ if (!needFlag) return;
- llvm::AllocaInst *Var = Scope.getActiveFlag();
- if (!Var) {
- Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
- Scope.setActiveFlag(Var);
+ llvm::AllocaInst *var = Scope.getActiveFlag();
+ if (!var) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(var);
+
+ assert(dominatingIP && "no existing variable and no dominating IP!");
// Initialize to true or false depending on whether it was
// active up to this point.
- CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation));
+ llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+
+ // If we're in a conditional block, ignore the dominating IP and
+ // use the outermost conditional branch.
+ if (CGF.isInConditionalBranch()) {
+ CGF.setBeforeOutermostConditional(value, var);
+ } else {
+ new llvm::StoreInst(value, var, dominatingIP);
+ }
}
- CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var);
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
}
/// Activate a cleanup that was created in an inactivated state.
-void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) {
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
assert(C != EHStack.stable_end() && "activating bottom of stack?");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
assert(!Scope.isActive() && "double activation");
- SetupCleanupBlockActivation(*this, C, ForActivation);
+ SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
Scope.setActive(true);
}
/// Deactive a cleanup that was created in an active state.
-void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
assert(Scope.isActive() && "double deactivation");
@@ -1066,7 +1082,7 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
}
// Otherwise, follow the general case.
- SetupCleanupBlockActivation(*this, C, ForDeactivation);
+ SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
Scope.setActive(false);
}
@@ -1077,3 +1093,11 @@ llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
return NormalCleanupDest;
}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ QualType TempType,
+ llvm::Value *Ptr) {
+ pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
+ /*useEHCleanup*/ true);
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index c7a9b407d264..7301d2060ba4 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -35,7 +35,6 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -75,7 +74,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(LB);
llvm::DIDescriptor D
= DBuilder.createLexicalBlockFile(LBF.getScope(),
- getOrCreateFile(CurLoc));
+ getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
LexicalBlockStack.push_back(N);
@@ -118,12 +117,25 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
assert (FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
- if (FII)
+ FunctionTemplateSpecializationInfo *Info
+ = FD->getTemplateSpecializationInfo();
+ if (!Info && FII)
return FII->getName();
// Otherwise construct human readable name for debug info.
std::string NS = FD->getNameAsString();
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ NS += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
// Copy this name on the side and use its reference.
char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
memcpy(StrPtr, NS.data(), NS.length());
@@ -131,7 +143,7 @@ StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
}
StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
- llvm::SmallString<256> MethodName;
+ SmallString<256> MethodName;
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
@@ -164,8 +176,8 @@ StringRef CGDebugInfo::getSelectorName(Selector S) {
/// getClassName - Get class name including template argument list.
StringRef
-CGDebugInfo::getClassName(RecordDecl *RD) {
- ClassTemplateSpecializationDecl *Spec
+CGDebugInfo::getClassName(const RecordDecl *RD) {
+ const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD);
if (!Spec)
return RD->getName();
@@ -184,7 +196,7 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
NumArgs = TemplateArgs.size();
}
Buffer = RD->getIdentifier()->getNameStart();
- PrintingPolicy Policy(CGM.getLangOptions());
+ PrintingPolicy Policy(CGM.getLangOpts());
Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
NumArgs,
Policy);
@@ -223,7 +235,6 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
DIFileCache[fname] = F;
return F;
-
}
/// getOrCreateMainFile - Get the file info for main compile unit.
@@ -234,7 +245,8 @@ llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
- assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getLine() : 0;
@@ -243,16 +255,20 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
/// getColumnNumber - Get column number for the location. If location is
/// invalid then use current location.
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
- assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getColumn() : 0;
}
StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
+ return CGM.getCodeGenOpts().DebugCompilationDir;
+
if (!CWDName.empty())
return CWDName;
- llvm::SmallString<256> CWD;
+ SmallString<256> CWD;
llvm::sys::fs::current_path(CWD);
char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
memcpy(CompDirnamePtr, CWD.data(), CWD.size());
@@ -285,7 +301,7 @@ void CGDebugInfo::CreateCompileUnit() {
StringRef Filename(FilenamePtr, MainFileName.length());
unsigned LangTag;
- const LangOptions &LO = CGM.getLangOptions();
+ const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
if (LO.ObjC1)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
@@ -321,35 +337,32 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
const char *BTName = NULL;
switch (BT->getKind()) {
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
- llvm_unreachable("Unexpected builtin type Dependent");
- case BuiltinType::Overload:
- llvm_unreachable("Unexpected builtin type Overload");
- case BuiltinType::BoundMember:
- llvm_unreachable("Unexpected builtin type BoundMember");
- case BuiltinType::UnknownAny:
- llvm_unreachable("Unexpected builtin type UnknownAny");
+ llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
return DBuilder.
- createNullPtrType(BT->getName(CGM.getContext().getLangOptions()));
+ createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DBuilder.createStructType(TheCU, "objc_class",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
// Class isa;
// } *id;
- llvm::DIType OCTy =
- DBuilder.createStructType(TheCU, "objc_class",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ // TODO: Cache these two types to avoid duplicates.
+ llvm::DIType OCTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
@@ -367,10 +380,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
0, 0, 0, 0, Elements);
}
case BuiltinType::ObjCSel: {
- return DBuilder.createStructType(TheCU, "objc_selector",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ return
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_selector", getOrCreateMainFile(),
+ 0);
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -403,7 +416,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ULong: BTName = "long unsigned int"; break;
case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
default:
- BTName = BT->getName(CGM.getContext().getLangOptions());
+ BTName = BT->getName(CGM.getContext().getLangOpts());
break;
}
// Bit size, align and offset of the type.
@@ -479,34 +492,82 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
Ty->getPointeeType(), Unit);
}
+// Creates a forward declaration for a RecordDecl in the given context.
+llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
+ llvm::DIDescriptor Ctx) {
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ // Get the tag.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ unsigned Tag = 0;
+ if (CXXDecl) {
+ RDName = getClassName(RD);
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+ else if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else
+ llvm_unreachable("Unknown RecordDecl type!");
+
+ // Create the type.
+ return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+}
+
+// Walk up the context chain and create forward decls for record decls,
+// and normal descriptors for namespaces.
+llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ // See if we already have the parent.
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(Context)) {
+ if (!RD->isDependentType()) {
+ llvm::DIType Ty = getOrCreateLimitedType(CGM.getContext().getTypeDeclType(RD),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
/// CreatePointeeType - Create Pointee type. If Pointee is a record
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
if (!CGM.getCodeGenOpts().LimitDebugInfo)
return getOrCreateType(PointeeTy, Unit);
-
+
+ // Limit debug info for the pointee type.
+
+ // If we have an existing type, use that, it's still smaller than creating
+ // a new type.
+ llvm::DIType Ty = getTypeOrNull(PointeeTy);
+ if (Ty.Verify()) return Ty;
+
+ // Handle qualifiers.
+ if (PointeeTy.hasLocalQualifiers())
+ return CreateQualifiedType(PointeeTy, Unit);
+
if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
RecordDecl *RD = RTy->getDecl();
- llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
llvm::DIDescriptor FDContext =
getContextDescriptor(cast<Decl>(RD->getDeclContext()));
-
- if (RD->isStruct())
- return DBuilder.createStructType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIArray());
- else if (RD->isUnion())
- return DBuilder.createUnionType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIArray());
- else {
- assert(RD->isClass() && "Unknown RecordType!");
- return DBuilder.createClassType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
- }
+ llvm::DIType RetTy = createRecordFwdDecl(RD, FDContext);
+ TypeCache[QualType(RTy, 0).getAsOpaquePtr()] = RetTy;
+ return RetTy;
}
return getOrCreateType(PointeeTy, Unit);
@@ -516,7 +577,6 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
-
if (Tag == llvm::dwarf::DW_TAG_reference_type)
return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
@@ -527,8 +587,8 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return
- DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit), Size, Align);
+ return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
+ Size, Align);
}
llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
@@ -594,8 +654,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
return BlockLiteralGeneric;
}
-llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
@@ -605,11 +664,12 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
const TypedefNameDecl *TyDecl = Ty->getDecl();
- llvm::DIDescriptor TydefContext =
+
+ llvm::DIDescriptor TypedefContext =
getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
-
- return
- DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TydefContext);
+
+ return
+ DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
@@ -634,6 +694,34 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
return DbgTy;
}
+
+void CGDebugInfo::
+CollectRecordStaticVars(const RecordDecl *RD, llvm::DIType FwdDecl) {
+
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (V->getInit()) {
+ const APValue *Value = V->evaluateValue();
+ if (Value && Value->isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
+ }
+ }
+ }
+ }
+}
+
llvm::DIType CGDebugInfo::createFieldType(StringRef name,
QualType type,
uint64_t sizeInBitsOverride,
@@ -674,44 +762,75 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
SmallVectorImpl<llvm::Value *> &elements,
llvm::DIType RecordTy) {
unsigned fieldNo = 0;
- const FieldDecl *LastFD = 0;
- bool IsMsStruct = record->hasAttr<MsStructAttr>();
-
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
- for (RecordDecl::field_iterator I = record->field_begin(),
- E = record->field_end();
- I != E; ++I, ++fieldNo) {
- FieldDecl *field = *I;
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are ignored
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
- --fieldNo;
- continue;
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
+
+ // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // has the name and the location of the variable so we should iterate over
+ // both concurrently.
+ if (CXXDecl && CXXDecl->isLambda()) {
+ RecordDecl::field_iterator Field = CXXDecl->field_begin();
+ unsigned fieldno = 0;
+ for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
+ E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ const LambdaExpr::Capture C = *I;
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this'.
+ if (C.capturesVariable()) {
+ VarDecl *V = C.getCapturedVar();
+ llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
+ StringRef VName = V->getName();
+ uint64_t SizeInBitsOverride = 0;
+ if (Field->isBitField()) {
+ SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+ llvm::DIType fieldType
+ = createFieldType(VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno),
+ VUnit, RecordTy);
+ elements.push_back(fieldType);
}
- LastFD = field;
}
+ } else {
+ bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ const FieldDecl *LastFD = 0;
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end();
+ I != E; ++I, ++fieldNo) {
+ FieldDecl *field = *I;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
+ --fieldNo;
+ continue;
+ }
+ LastFD = field;
+ }
- StringRef name = field->getName();
- QualType type = field->getType();
+ StringRef name = field->getName();
+ QualType type = field->getType();
- // Ignore unnamed fields unless they're anonymous structs/unions.
- if (name.empty() && !type->isRecordType()) {
- LastFD = field;
- continue;
- }
+ // Ignore unnamed fields unless they're anonymous structs/unions.
+ if (name.empty() && !type->isRecordType()) {
+ LastFD = field;
+ continue;
+ }
- uint64_t SizeInBitsOverride = 0;
- if (field->isBitField()) {
- SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
- assert(SizeInBitsOverride && "found named 0-width bitfield");
- }
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
- llvm::DIType fieldType
- = createFieldType(name, type, SizeInBitsOverride,
- field->getLocation(), field->getAccess(),
- layout.getFieldOffset(fieldNo), tunit, RecordTy);
+ llvm::DIType fieldType
+ = createFieldType(name, type, SizeInBitsOverride,
+ field->getLocation(), field->getAccess(),
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
- elements.push_back(fieldType);
+ elements.push_back(fieldType);
+ }
}
}
@@ -725,7 +844,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
= getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
0),
Unit);
-
+
// Add "this" pointer.
llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
@@ -738,11 +857,29 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
if (!Method->isStatic()) {
// "this" pointer is always first argument.
QualType ThisPtr = Method->getThisType(CGM.getContext());
- llvm::DIType ThisPtrType =
- DBuilder.createArtificialType(getOrCreateType(ThisPtr, Unit));
-
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- Elts.push_back(ThisPtrType);
+
+ const CXXRecordDecl *RD = Method->getParent();
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ // Create pointer type directly in this case.
+ const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
+ QualType PointeeTy = ThisPtrTy->getPointeeType();
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ // TODO: This and the artificial type below are misleading, the
+ // types aren't artificial the argument is, but the current
+ // metadata doesn't represent that.
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ } else {
+ llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ }
}
// Copy rest of the arguments.
@@ -757,14 +894,13 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
/// inside a function.
static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
- if (const CXXRecordDecl *NRD =
- dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
return isFunctionLocalClass(NRD);
- else if (isa<FunctionDecl>(RD->getDeclContext()))
+ if (isa<FunctionDecl>(RD->getDeclContext()))
return true;
return false;
-
}
+
/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
/// a single member function GlobalDecl.
llvm::DISubprogram
@@ -823,35 +959,48 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
}
if (Method->hasPrototype())
Flags |= llvm::DIDescriptor::FlagPrototyped;
-
+
+ llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram SP =
DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
/* isDefinition=*/ false,
Virtuality, VIndex, ContainingType,
- Flags, CGM.getLangOptions().Optimize);
+ Flags, CGM.getLangOpts().Optimize, NULL,
+ TParamsArray);
- SPCache[Method] = llvm::WeakVH(SP);
+ SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
return SP;
}
/// CollectCXXMemberFunctions - A helper function to collect debug info for
-/// C++ member functions.This is used while creating debug info entry for
+/// C++ member functions. This is used while creating debug info entry for
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
- for(CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *Method = *I;
-
- if (Method->isImplicit() && !Method->isUsed())
+
+ // Since we want more than just the individual member decls if we
+ // have templated functions iterate over every declaration to gather
+ // the functions.
+ for(DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end(); I != E; ++I) {
+ Decl *D = *I;
+ if (D->isImplicit() && !D->isUsed())
continue;
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
+ SE = FTD->spec_end(); SI != SE; ++SI) {
+ FunctionDecl *FD = *SI;
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
+ EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
+ }
}
}
@@ -862,7 +1011,7 @@ void CGDebugInfo::
CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
- for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
BE = RD->friend_end(); BI != BE; ++BI) {
if ((*BI)->isUnsupportedFriend())
continue;
@@ -992,7 +1141,7 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
/// getVTableName - Get vtable name for the given Class.
StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
- // Otherwise construct gdb compatible name name.
+ // Construct gdb compatible name name.
std::string Name = "_vptr$" + RD->getNameAsString();
// Copy this name on the side and use its reference.
@@ -1028,7 +1177,15 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
- llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ return T;
+}
+
+/// getOrCreateInterfaceType - Emit an objective c interface type standalone
+/// debug info.
+llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
}
@@ -1036,11 +1193,9 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// CreateType - get structure or union type.
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
- llvm::DIFile Unit = getOrCreateFile(RD->getLocation());
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
@@ -1048,126 +1203,63 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIDescriptor FDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- // If this is just a forward declaration, construct an appropriately
- // marked node and just return it.
- if (!RD->getDefinition()) {
- llvm::DIType FwdDecl =
- DBuilder.createStructType(FDContext, RD->getName(),
- DefUnit, Line, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ llvm::DIType FwdDecl = getOrCreateLimitedType(QualType(Ty, 0), DefUnit);
- return FwdDecl;
- }
+ if (FwdDecl.isForwardDecl())
+ return FwdDecl;
- llvm::DIType FwdDecl = DBuilder.createTemporaryType(DefUnit);
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(FwdDecl);
- llvm::MDNode *MN = FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
- // Otherwise, insert it into the TypeCache so that recursive uses will find
- // it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
LexicalBlockStack.push_back(FwdDeclNode);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ // Add this to the completed types cache since we're completing it.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
// Convert all the elements.
SmallVector<llvm::Value *, 16> EltTys;
+ // Note: The split of CXXDecl information here is intentional, the
+ // gdb tests will depend on a certain ordering at printout. The debug
+ // information offsets are still correct if we merge them all together
+ // though.
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
- CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
- CollectVTableInfo(CXXDecl, Unit, EltTys);
+ CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
}
-
- // Collect static variables with initializers.
- for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
- I != E; ++I)
- if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
- if (const Expr *Init = V->getInit()) {
- Expr::EvalResult Result;
- if (Init->Evaluate(Result, CGM.getContext()) && Result.Val.isInt()) {
- llvm::ConstantInt *CI
- = llvm::ConstantInt::get(CGM.getLLVMContext(), Result.Val.getInt());
-
- // Create the descriptor for static variable.
- llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
- StringRef VName = V->getName();
- llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
- // Do not use DIGlobalVariable for enums.
- if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
- DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
- getLineNumber(V->getLocation()),
- VTy, true, CI);
- }
- }
- }
- }
- CollectRecordFields(RD, Unit, EltTys, FwdDecl);
+ // Collect static variables with initializers and other fields.
+ CollectRecordStaticVars(RD, FwdDecl);
+ CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
llvm::DIArray TParamsArray;
if (CXXDecl) {
- CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
- CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, DefUnit, EltTys, FwdDecl);
if (const ClassTemplateSpecializationDecl *TSpecial
= dyn_cast<ClassTemplateSpecializationDecl>(RD))
- TParamsArray = CollectCXXTemplateParams(TSpecial, Unit);
+ TParamsArray = CollectCXXTemplateParams(TSpecial, DefUnit);
}
LexicalBlockStack.pop_back();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
- RegionMap.find(Ty->getDecl());
- if (RI != RegionMap.end())
- RegionMap.erase(RI);
+ RegionMap.erase(Ty->getDecl());
- llvm::DIDescriptor RDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- StringRef RDName = RD->getName();
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- llvm::MDNode *RealDecl = NULL;
-
+ // FIXME: Magic numbers ahoy! These should be changed when we
+ // get some enums in llvm/Analysis/DebugInfo.h to refer to
+ // them.
if (RD->isUnion())
- RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
+ FwdDeclNode->replaceOperandWith(10, Elements);
else if (CXXDecl) {
- RDName = getClassName(RD);
- // A class's primary base or the class itself contains the vtable.
- llvm::MDNode *ContainingType = NULL;
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
- // Seek non virtual primary base root.
- while (1) {
- const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
- const CXXRecordDecl *PBT = BRL.getPrimaryBase();
- if (PBT && !BRL.isPrimaryBaseVirtual())
- PBase = PBT;
- else
- break;
- }
- ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
- }
- else if (CXXDecl->isDynamicClass())
- ContainingType = FwdDecl;
-
- RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, 0, llvm::DIType(),
- Elements, ContainingType,
- TParamsArray);
- } else
- RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ FwdDeclNode->replaceOperandWith(13, TParamsArray);
+ } else
+ FwdDeclNode->replaceOperandWith(10, Elements);
- // Now that we have a real decl for the struct, replace anything using the
- // old decl with the new one. This will recursively update the debug info.
- llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[RD] = llvm::WeakVH(RealDecl);
- return llvm::DIType(RealDecl);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDeclNode);
+ return llvm::DIType(FwdDeclNode);
}
/// CreateType - get objective-c object type.
@@ -1191,30 +1283,38 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// If this is just a forward declaration return a special forward-declaration
// debug type since we won't be able to lay out the entire type.
- if (ID->isForwardDecl()) {
+ ObjCInterfaceDecl *Def = ID->getDefinition();
+ if (!Def) {
llvm::DIType FwdDecl =
- DBuilder.createStructType(Unit, ID->getName(),
- DefUnit, Line, 0, 0, 0,
- llvm::DIArray(), RuntimeLang);
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(), DefUnit, Line,
+ RuntimeLang);
return FwdDecl;
}
- // To handle a recursive interface, we first generate a debug descriptor
- // for the struct as a forward declaration. Then (if it is a definition)
- // we go through and get debug info for all of its members. Finally, we
- // create a descriptor for the complete type (which may refer to the
- // forward decl if the struct is recursive) and replace all uses of the
- // forward declaration with the final definition.
- llvm::DIType FwdDecl = DBuilder.createTemporaryType(DefUnit);
-
- llvm::MDNode *MN = FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
- // Otherwise, insert it into the TypeCache so that recursive uses will find
- // it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ ID = Def;
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
+
+ llvm::DIType RealDecl =
+ DBuilder.createStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, Flags,
+ llvm::DIArray(), RuntimeLang);
+
+ // Otherwise, insert it into the CompletedTypeCache so that recursive uses
+ // will find it and we're emitting the complete type.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
// Push the struct on region stack.
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(RealDecl);
+
LexicalBlockStack.push_back(FwdDeclNode);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
// Convert all the elements.
SmallVector<llvm::Value *, 16> EltTys;
@@ -1227,12 +1327,31 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return llvm::DIType();
llvm::DIType InhTag =
- DBuilder.createInheritance(FwdDecl, SClassTy, 0, 0);
+ DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
+ for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
+ E = ID->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ llvm::MDNode *PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ EltTys.push_back(PropertyNode);
+ }
+
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
- ObjCImplementationDecl *ImpD = ID->getImplementation();
unsigned FieldNo = 0;
for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
Field = Field->getNextIvar(), ++FieldNo) {
@@ -1266,7 +1385,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// the non-fragile abi and the debugger should ignore the value anyways.
// Call it the FieldNo+1 due to how debuggers use the information,
// e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOptions().ObjCNonFragileABI ? FieldNo+1
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
: RL.getFieldOffset(FieldNo);
unsigned Flags = 0;
@@ -1275,69 +1394,43 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
- StringRef PropertyName;
- StringRef PropertyGetter;
- StringRef PropertySetter;
- unsigned PropertyAttributes = 0;
- ObjCPropertyDecl *PD = NULL;
- if (ImpD)
+ llvm::MDNode *PropertyNode = NULL;
+ if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
- ImpD->FindPropertyImplIvarDecl(Field->getIdentifier()))
- PD = PImpD->getPropertyDecl();
- if (PD) {
- PropertyName = PD->getName();
- PropertyGetter = getSelectorName(PD->getGetterName());
- PropertySetter = getSelectorName(PD->getSetterName());
- PropertyAttributes = PD->getPropertyAttributes();
- }
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ }
+ }
+ }
FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
FieldLine, FieldSize, FieldAlign,
FieldOffset, Flags, FieldTy,
- PropertyName, PropertyGetter,
- PropertySetter, PropertyAttributes);
+ PropertyNode);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
LexicalBlockStack.pop_back();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
- RegionMap.find(Ty->getDecl());
- if (RI != RegionMap.end())
- RegionMap.erase(RI);
-
- // Bit size, align and offset of the type.
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
-
- unsigned Flags = 0;
- if (ID->getImplementation())
- Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
-
- llvm::DIType RealDecl =
- DBuilder.createStructType(Unit, ID->getName(), DefUnit,
- Line, Size, Align, Flags,
- Elements, RuntimeLang);
-
- // Now that we have a real decl for the struct, replace anything using the
- // old decl with the new one. This will recursively update the debug info.
- llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[ID] = llvm::WeakVH(RealDecl);
-
- return RealDecl;
-}
-
-llvm::DIType CGDebugInfo::CreateType(const TagType *Ty) {
- if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return CreateType(RT);
- else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return CreateEnumType(ET->getDecl());
-
- return llvm::DIType();
+ return llvm::DIType(FwdDeclNode);
}
-llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t NumElems = Ty->getNumElements();
int64_t LowerBound = 0;
@@ -1545,20 +1638,14 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
if (T == LastT)
return T;
} while (true);
-
- return T;
}
-/// getOrCreateType - Get the type from the cache or create a new
-/// one if necessary.
-llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
- llvm::DIFile Unit) {
- if (Ty.isNull())
- return llvm::DIType();
+/// getType - Get the type from the cache or return null type if it doesn't exist.
+llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty);
-
+
// Check for existing entry.
llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(Ty.getAsOpaquePtr());
@@ -1568,17 +1655,59 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
return llvm::DIType(cast<llvm::MDNode>(it->second));
}
+ return llvm::DIType();
+}
+
+/// getCompletedTypeOrNull - Get the type from the cache or return null if it
+/// doesn't exist.
+llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ CompletedTypeCache.find(Ty.getAsOpaquePtr());
+ if (it != CompletedTypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getCompletedTypeOrNull(Ty);
+
+ if (T.Verify()) return T;
+
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
+ llvm::DIType TC = getTypeOrNull(Ty);
+ if (TC.Verify() && TC.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+
// And update the type cache.
- TypeCache[Ty.getAsOpaquePtr()] = Res;
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+
+ if (!Res.isForwardDecl())
+ CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
return Res;
}
/// CreateTypeNode - Create a new debug type node.
-llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
// Handle qualifiers, which recursively handles what they refer to.
if (Ty.hasLocalQualifiers())
return CreateQualifiedType(Ty, Unit);
@@ -1603,15 +1732,20 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
- case Type::Builtin: return CreateType(cast<BuiltinType>(Ty));
- case Type::Complex: return CreateType(cast<ComplexType>(Ty));
- case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::Builtin:
+ return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex:
+ return CreateType(cast<ComplexType>(Ty));
+ case Type::Pointer:
+ return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
- case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Typedef:
+ return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
+ return CreateType(cast<RecordType>(Ty));
case Type::Enum:
- return CreateType(cast<TagType>(Ty));
+ return CreateEnumType(cast<EnumType>(Ty)->getDecl());
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
@@ -1642,7 +1776,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::UnaryTransform:
case Type::Auto:
llvm_unreachable("type should have been unwrapped!");
- return llvm::DIType();
}
assert(Diag && "Fall through without a diagnostic?");
@@ -1653,6 +1786,123 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return llvm::DIType();
}
+/// getOrCreateLimitedType - Get the type from the cache or create a new
+/// limited type if necessary.
+llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getTypeOrNull(Ty);
+
+ // We may have cached a forward decl when we could have created
+ // a non-forward decl. Go ahead and create a non-forward decl
+ // now.
+ if (T.Verify() && !T.isForwardDecl()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
+
+ if (T.Verify() && T.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+// TODO: Currently used for context chains when limiting debug info.
+llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ llvm::DIDescriptor RDContext;
+ if (CGM.getCodeGenOpts().LimitDebugInfo)
+ RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
+ else
+ RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition())
+ return createRecordFwdDecl(RD, RDContext);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ llvm::TrackingVH<llvm::MDNode> RealDecl;
+
+ if (RD->isUnion())
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+ else if (CXXDecl) {
+ RDName = getClassName(RD);
+
+ // FIXME: This could be a struct type giving a default visibility different
+ // than C++ class type, but needs llvm metadata changes first.
+ RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(),
+ llvm::DIArray());
+ } else
+ RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = llvm::DIType(RealDecl);
+
+ if (CXXDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = RealDecl;
+
+ RealDecl->replaceOperandWith(12, ContainingType);
+ }
+ return llvm::DIType(RealDecl);
+}
+
+/// CreateLimitedTypeNode - Create a new debug type node, but only forward
+/// declare composite types that haven't been processed yet.
+llvm::DIType CGDebugInfo::CreateLimitedTypeNode(QualType Ty,llvm::DIFile Unit) {
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+ #include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::Record:
+ return CreateLimitedType(cast<RecordType>(Ty));
+ default:
+ return CreateTypeNode(Ty, Unit);
+ }
+}
+
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name,
@@ -1677,7 +1927,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
getContextDescriptor(cast<Decl>(D->getDeclContext()));
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(FD);
+ MI = SPCache.find(FD->getCanonicalDecl());
if (MI != SPCache.end()) {
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
@@ -1688,7 +1938,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
E = FD->redecls_end(); I != E; ++I) {
const FunctionDecl *NextFD = *I;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(NextFD);
+ MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
@@ -1705,7 +1955,7 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
llvm::DIFile F) {
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
- else if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
SmallVector<llvm::Value *, 16> Elts;
@@ -1726,8 +1976,7 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
return getOrCreateType(FnType, F);
}
-/// EmitFunctionStart - Constructs the debug code for entering a function -
-/// "llvm.dbg.func.start.".
+/// EmitFunctionStart - Constructs the debug code for entering a function.
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn,
CGBuilderTy &Builder) {
@@ -1738,15 +1987,17 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
+ // Use the location of the declaration.
+ SourceLocation Loc = D->getLocation();
unsigned Flags = 0;
- llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ llvm::DIFile Unit = getOrCreateFile(Loc);
llvm::DIDescriptor FDContext(Unit);
llvm::DIArray TParamsArray;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // If there is a DISubprogram for this function available then use it.
+ // If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- FI = SPCache.find(FD);
+ FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
@@ -1758,12 +2009,13 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
- if (!Fn->hasInternalLinkage())
+ if (FD->hasPrototype()) {
LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
if (LinkageName == Name)
LinkageName = StringRef();
- if (FD->hasPrototype())
- Flags |= llvm::DIDescriptor::FlagPrototyped;
+
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
@@ -1784,18 +2036,17 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
- // It is expected that CurLoc is set before using EmitFunctionStart.
- // Usually, CurLoc points to the left bracket location of compound
- // statement representing function body.
- unsigned LineNo = getLineNumber(CurLoc);
+ unsigned LineNo = getLineNumber(Loc);
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
+
llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
llvm::DISubprogram SP =
DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
LineNo, getOrCreateFunctionType(D, FnType, Unit),
Fn->hasInternalLinkage(), true/*definition*/,
- Flags, CGM.getLangOptions().Optimize, Fn,
+ getLineNumber(CurLoc),
+ Flags, CGM.getLangOpts().Optimize, Fn,
TParamsArray, SPDecl);
// Push function on region stack.
@@ -1835,11 +2086,11 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
llvm::DIDescriptor D =
DBuilder.createLexicalBlock(LexicalBlockStack.empty() ?
- llvm::DIDescriptor() :
- llvm::DIDescriptor(LexicalBlockStack.back()),
- getOrCreateFile(CurLoc),
- getLineNumber(CurLoc),
- getColumnNumber(CurLoc));
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
LexicalBlockStack.push_back(DN);
}
@@ -1855,8 +2106,8 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc
// Emit a line table change for the current location inside the new scope.
Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
- getColumnNumber(Loc),
- LexicalBlockStack.back()));
+ getColumnNumber(Loc),
+ LexicalBlockStack.back()));
}
/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
@@ -1997,7 +2248,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = CGM.Int64Ty;
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
@@ -2017,7 +2268,6 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
addr, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- // Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
@@ -2027,7 +2277,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
Name, Unit, Line, Ty,
- CGM.getLangOptions().Optimize, Flags, ArgNo);
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
@@ -2056,7 +2306,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
FieldName, Unit, Line, FieldTy,
- CGM.getLangOptions().Optimize, Flags,
+ CGM.getLangOpts().Optimize, Flags,
ArgNo);
// Insert an llvm.dbg.declare into the current block.
@@ -2103,7 +2353,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = CGM.Int64Ty;
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
if (isByRef) {
@@ -2261,7 +2511,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
fields.push_back(fieldType);
}
- llvm::SmallString<36> typeName;
+ SmallString<36> typeName;
llvm::raw_svector_ostream(typeName)
<< "__block_literal_" << CGM.getUniqueBlockCount();
@@ -2284,7 +2534,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
llvm::DIDescriptor(scope),
name, tunit, line, type,
- CGM.getLangOptions().Optimize, flags,
+ CGM.getLangOpts().Optimize, flags,
cast<llvm::Argument>(addr)->getArgNo() + 1);
// Insert an llvm.dbg.value into the current block.
@@ -2297,7 +2547,6 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
-
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2314,7 +2563,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
- ArrayType::Normal, 0);
+ ArrayType::Normal, 0);
}
StringRef DeclName = D->getName();
StringRef LinkageName;
@@ -2395,16 +2644,25 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
return NS;
}
-/// UpdateCompletedType - Update type cache because the type is now
-/// translated.
-void CGDebugInfo::UpdateCompletedType(const TagDecl *TD) {
- QualType Ty = CGM.getContext().getTagDeclType(TD);
-
- // If the type exist in type cache then remove it from the cache.
- // There is no need to prepare debug info for the completed type
- // right now. It will be generated on demand lazily.
- llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
- TypeCache.find(Ty.getAsOpaquePtr());
- if (it != TypeCache.end())
- TypeCache.erase(it);
+void CGDebugInfo::finalize(void) {
+ for (std::vector<std::pair<void *, llvm::WeakVH> >::const_iterator VI
+ = ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) {
+ llvm::DIType Ty, RepTy;
+ // Verify that the debug info still exists.
+ if (&*VI->second)
+ Ty = llvm::DIType(cast<llvm::MDNode>(VI->second));
+
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(VI->first);
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ RepTy = llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
+ Ty.replaceAllUsesWith(RepTy);
+ }
+ }
+ DBuilder.finalize();
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index a4533a83d579..ec7705c05447 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -53,6 +53,13 @@ class CGDebugInfo {
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+ /// CompleteTypeCache - Cache of previously constructed complete RecordTypes.
+ llvm::DenseMap<void *, llvm::WeakVH> CompletedTypeCache;
+
+ /// ReplaceMap - Cache of forward declared types to RAUW at the end of
+ /// compilation.
+ std::vector<std::pair<void *, llvm::WeakVH> >ReplaceMap;
+
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
@@ -83,8 +90,8 @@ class CGDebugInfo {
llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const TagType *Ty);
llvm::DIType CreateType(const RecordType *Ty);
+ llvm::DIType CreateLimitedType(const RecordType *Ty);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
@@ -94,6 +101,8 @@ class CGDebugInfo {
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType getTypeOrNull(const QualType);
+ llvm::DIType getCompletedTypeOrNull(const QualType);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
@@ -139,6 +148,7 @@ class CGDebugInfo {
AccessSpecifier AS, uint64_t offsetInBits,
llvm::DIFile tunit,
llvm::DIDescriptor scope);
+ void CollectRecordStaticVars(const RecordDecl *, llvm::DIType);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &E,
llvm::DIType RecordTy);
@@ -154,7 +164,8 @@ class CGDebugInfo {
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
- void finalize() { DBuilder.finalize(); }
+
+ void finalize(void);
/// setLocation - Update the current source location. If \arg loc is
/// invalid it is ignored.
@@ -172,10 +183,6 @@ public:
/// EmitFunctionEnd - Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder);
- /// UpdateCompletedType - Update type cache because the type is now
- /// translated.
- void UpdateCompletedType(const TagDecl *TD);
-
/// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a
/// new lexical block and push the block onto the stack.
void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
@@ -219,6 +226,12 @@ public:
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
+
+ /// getOrCreateInterfaceType - Emit an objective c interface type standalone
+ /// debug info.
+ llvm::DIType getOrCreateInterfaceType(QualType Ty,
+ SourceLocation Loc);
+
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
@@ -232,6 +245,13 @@ private:
/// getContextDescriptor - Get context info for the decl.
llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
+ /// createRecordFwdDecl - Create a forward decl for a RecordType in a given
+ /// context.
+ llvm::DIType createRecordFwdDecl(const RecordDecl *, llvm::DIDescriptor);
+
+ /// createContextChain - Create a set of decls for the context chain.
+ llvm::DIDescriptor createContextChain(const Decl *Decl);
+
/// getCurrentDirname - Return current directory name.
StringRef getCurrentDirname();
@@ -249,9 +269,17 @@ private:
/// necessary.
llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+ /// getOrCreateLimitedType - Get the type from the cache or create a new
+ /// partial type if necessary.
+ llvm::DIType getOrCreateLimitedType(QualType Ty, llvm::DIFile F);
+
/// CreateTypeNode - Create type metadata for a source language type.
llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+ /// CreateLimitedTypeNode - Create type metadata for a source language
+ /// type, but only partial types for records.
+ llvm::DIType CreateLimitedTypeNode(QualType Ty, llvm::DIFile F);
+
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name, uint64_t *Offset);
@@ -274,7 +302,7 @@ private:
StringRef getSelectorName(Selector S);
/// getClassName - Get class name including template argument list.
- StringRef getClassName(RecordDecl *RD);
+ StringRef getClassName(const RecordDecl *RD);
/// getVTableName - Get vtable name for the given Class.
StringRef getVTableName(const CXXRecordDecl *Decl);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index a6147ea7658b..8c154f070845 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -65,8 +65,6 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
- case Decl::ObjCClass:
- case Decl::ObjCForwardProtocol:
case Decl::FileScopeAsm:
case Decl::Friend:
case Decl::FriendTemplate:
@@ -84,6 +82,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
+ case Decl::Import:
// None of these decls require codegen support.
return;
@@ -122,7 +121,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
// uniqued. We can't do this in C, though, because there's no
// standard way to agree on which variables are the same (i.e.
// there's no mangling).
- if (getContext().getLangOptions().CPlusPlus)
+ if (getContext().getLangOpts().CPlusPlus)
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
@@ -142,7 +141,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
- if (CGF.getContext().getLangOptions().CPlusPlus) {
+ if (CGF.getContext().getLangOpts().CPlusPlus) {
StringRef Name = CGM.getMangledName(&D);
return Name.str();
}
@@ -177,7 +176,12 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
- std::string Name = GetStaticDeclName(*this, D, Separator);
+ // Use the label if the variable is renamed with the asm-label extension.
+ std::string Name;
+ if (D.hasAttr<AsmLabelAttr>())
+ Name = CGM.getMangledName(&D);
+ else
+ Name = GetStaticDeclName(*this, D, Separator);
llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
llvm::GlobalVariable *GV =
@@ -192,6 +196,14 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
return GV;
}
+/// hasNontrivialDestruction - Determine whether a type's destruction is
+/// non-trivial. If so, and the variable uses static initialization, we must
+/// register its destructor to run on exit.
+static bool hasNontrivialDestruction(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return RD && !RD->hasTrivialDestructor();
+}
+
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
@@ -199,19 +211,19 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *
CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV) {
- llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+ llvm::Constant *Init = CGM.EmitConstantInit(D, this);
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
- if (!getContext().getLangOptions().CPlusPlus)
+ if (!getContext().getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
- EmitCXXGuardedInit(D, GV);
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
}
return GV;
}
@@ -243,7 +255,16 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->eraseFromParent();
}
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true));
GV->setInitializer(Init);
+
+ if (hasNontrivialDestruction(D.getType())) {
+ // We have a constant initializer, but a nontrivial destructor. We still
+ // need to perform a guarded "initialization" in order to register the
+ // destructor.
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
+ }
+
return GV;
}
@@ -252,11 +273,23 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
- llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
+ // Check to see if we already have a global variable for this
+ // declaration. This can happen when double-emitting function
+ // bodies, e.g. with complete and base constructors.
+ llvm::Constant *addr =
+ CGM.getStaticLocalDeclAddress(&D);
+
+ llvm::GlobalVariable *var;
+ if (addr) {
+ var = cast<llvm::GlobalVariable>(addr->stripPointerCasts());
+ } else {
+ addr = var = CreateStaticVarDecl(D, ".", Linkage);
+ }
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- DMEntry = GV;
+ DMEntry = addr;
+ CGM.setStaticLocalDeclAddress(&D, addr);
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -264,40 +297,38 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
if (D.getType()->isVariablyModifiedType())
EmitVariablyModifiedType(D.getType());
- // Local static block variables must be treated as globals as they may be
- // referenced in their RHS initializer block-literal expresion.
- CGM.setStaticLocalDeclAddress(&D, GV);
+ // Save the type in case adding the initializer forces a type change.
+ llvm::Type *expectedType = addr->getType();
// If this value has an initializer, emit it.
if (D.getInit())
- GV = AddInitializerToStaticVarDecl(D, GV);
+ var = AddInitializerToStaticVarDecl(D, var);
- GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (D.hasAttr<AnnotateAttr>())
- CGM.AddGlobalAnnotations(&D, GV);
+ CGM.AddGlobalAnnotations(&D, var);
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
- GV->setSection(SA->getName());
+ var->setSection(SA->getName());
if (D.hasAttr<UsedAttr>())
- CGM.AddUsedGlobal(GV);
+ CGM.AddUsedGlobal(var);
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- llvm::Type *LPtrTy =
- LTy->getPointerTo(CGM.getContext().getTargetAddressSpace(D.getType()));
- DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+ llvm::Constant *castedAddr = llvm::ConstantExpr::getBitCast(var, expectedType);
+ DMEntry = castedAddr;
+ CGM.setStaticLocalDeclAddress(&D, castedAddr);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(D.getLocation());
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ DI->EmitGlobalVariable(var, &D);
}
}
@@ -306,12 +337,12 @@ namespace {
DestroyObject(llvm::Value *addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : addr(addr), type(type), destroyer(*destroyer),
+ : addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
llvm::Value *addr;
QualType type;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -371,8 +402,8 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) {
// Compute the address of the local variable, in case it's a
// byref or something.
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
CGF.EmitExtendGCLifetime(value);
}
@@ -388,8 +419,8 @@ namespace {
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
@@ -426,7 +457,7 @@ static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
break;
case Qualifiers::OCL_Strong: {
- CodeGenFunction::Destroyer &destroyer =
+ CodeGenFunction::Destroyer *destroyer =
(var.hasAttr<ObjCPreciseLifetimeAttr>()
? CodeGenFunction::destroyARCStrongPrecise
: CodeGenFunction::destroyARCStrongImprecise);
@@ -490,15 +521,17 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(RValue::get(value), lvalue);
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- CodeGenFunction::RunCleanupsScope Scope(*this);
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init))
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
+ enterFullExpression(ewc);
init = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
// zero-initialized. If the variable might be accessed in its
@@ -529,7 +562,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// Otherwise just do a simple store.
else
- EmitStoreOfScalar(zero, tempLV);
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
}
// Emit the initializer.
@@ -575,19 +608,19 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, /*precise*/ false);
return;
}
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
/// EmitScalarInit - Initialize the given lvalue with the given object.
void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime)
- return EmitStoreThroughLValue(RValue::get(init), lvalue);
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
switch (lifetime) {
case Qualifiers::OCL_None:
@@ -611,7 +644,7 @@ void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
break;
}
- EmitStoreOfScalar(init, lvalue);
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
@@ -638,6 +671,16 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
}
return true;
}
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
// Anything else is hard and scary.
return false;
@@ -648,17 +691,26 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
/// stores that would be required.
static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
bool isVolatile, CGBuilderTy &Builder) {
- // Zero doesn't require any stores.
- if (isa<llvm::ConstantAggregateZero>(Init) ||
- isa<llvm::ConstantPointerNull>(Init) ||
- isa<llvm::UndefValue>(Init))
+ // Zero doesn't require a store.
+ if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
return;
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- if (!Init->isNullValue())
- Builder.CreateStore(Init, Loc, isVolatile);
+ Builder.CreateStore(Init, Loc, isVolatile);
+ return;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
return;
}
@@ -667,9 +719,7 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
- if (Elt->isNullValue()) continue;
-
- // Otherwise, get a pointer to the element and emit it.
+ // Get a pointer to the element and emit it.
emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
isVolatile, Builder);
}
@@ -728,15 +778,17 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getContext().getLangOptions().ElideConstructors &&
+ bool NRVO = getContext().getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is a POD array or struct with a statically
- // determinable constant initializer, there are optimizations we
- // can do.
- // TODO: we can potentially constant-evaluate non-POD structs and
- // arrays as long as the initialization is trivial (e.g. if they
- // have a non-trivial destructor, but not a non-trivial constructor).
+ // determinable constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
if (D.getInit() &&
(Ty->isArrayType() || Ty->isRecordType()) &&
(Ty.isPODType(getContext()) ||
@@ -744,9 +796,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
D.getInit()->isConstantInitializer(getContext(), false)) {
// If the variable's a const type, and it's neither an NRVO
- // candidate nor a __block variable, emit it as a global instead.
- if (CGM.getCodeGenOpts().MergeAllConstants && Ty.isConstQualified() &&
- !NRVO && !isByRef) {
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
emission.Address = 0; // signal this condition to later callbacks
@@ -788,7 +841,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getNameAsString());
+ Alloc->setName(D.getName());
CharUnits allocaAlignment = alignment;
if (isByRef)
@@ -960,21 +1013,22 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
- if (!emission.IsConstantAggregate) {
- LValue lv = MakeAddrLValue(Loc, type, alignment.getQuantity());
+ llvm::Constant *constant = 0;
+ if (emission.IsConstantAggregate) {
+ assert(!capturedByInit && "constant init contains a capturing block?");
+ constant = CGM.EmitConstantInit(D, this);
+ }
+
+ if (!constant) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
- assert(!capturedByInit && "constant init contains a capturing block?");
-
bool isVolatile = type.isVolatileQualified();
- llvm::Constant *constant = CGM.EmitConstantExpr(D.getInit(), type, this);
- assert(constant != 0 && "Wasn't a simple constant init?");
-
llvm::Value *SizeVal =
llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
@@ -1035,7 +1089,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
RValue rvalue = EmitReferenceBindingToExpr(init, D);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(rvalue, lvalue);
+ EmitStoreThroughLValue(rvalue, lvalue, true);
} else if (!hasAggregateLLVMType(type)) {
EmitScalarInit(init, D, lvalue, capturedByInit);
} else if (type->isAnyComplexType()) {
@@ -1049,6 +1103,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
+ MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
}
}
@@ -1101,7 +1156,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
}
// If we haven't chosen a more specific destroyer, use the default.
- if (!destroyer) destroyer = &getDestroyer(dtorKind);
+ if (!destroyer) destroyer = getDestroyer(dtorKind);
// Use an EH cleanup in array destructors iff the destructor itself
// is being pushed as an EH cleanup.
@@ -1123,7 +1178,7 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
emitAutoVarTypeCleanup(emission, dtorKind);
// In GC mode, honor objc_precise_lifetime.
- if (getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
@@ -1135,7 +1190,7 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
llvm::Constant *F = CGM.GetAddrOfFunction(FD);
assert(F && "Could not find function!");
- const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
}
@@ -1145,25 +1200,18 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
enterByrefCleanup(emission);
}
-CodeGenFunction::Destroyer &
+CodeGenFunction::Destroyer *
CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
- // This is surprisingly compiler-dependent. GCC 4.2 can't bind
- // references to functions directly in returns, and using '*&foo'
- // confuses MSVC. Luckily, the following code pattern works in both.
- Destroyer *destroyer = 0;
switch (kind) {
case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
case QualType::DK_cxx_destructor:
- destroyer = &destroyCXXObject;
- break;
+ return destroyCXXObject;
case QualType::DK_objc_strong_lifetime:
- destroyer = &destroyARCStrongPrecise;
- break;
+ return destroyARCStrongPrecise;
case QualType::DK_objc_weak_lifetime:
- destroyer = &destroyARCWeak;
- break;
+ return destroyARCWeak;
}
- return *destroyer;
+ llvm_unreachable("Unknown DestructionKind");
}
/// pushDestroy - Push the standard destructor for the given type.
@@ -1177,7 +1225,7 @@ void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
}
void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
- QualType type, Destroyer &destroyer,
+ QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
@@ -1195,7 +1243,7 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
/// used when destroying array elements, in case one of the
/// destructions throws an exception
void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
@@ -1232,7 +1280,7 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
assert(!type->isArrayType());
@@ -1283,7 +1331,7 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type,
- CodeGenFunction::Destroyer &destroyer) {
+ CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
@@ -1316,13 +1364,13 @@ namespace {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEnd;
QualType ElementType;
- CodeGenFunction::Destroyer &Destroyer;
+ CodeGenFunction::Destroyer *Destroyer;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
- ElementType(elementType), Destroyer(*destroyer) {}
+ ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
@@ -1337,14 +1385,14 @@ namespace {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEndPointer;
QualType ElementType;
- CodeGenFunction::Destroyer &Destroyer;
+ CodeGenFunction::Destroyer *Destroyer;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
- ElementType(elementType), Destroyer(*destroyer) {}
+ ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
@@ -1367,10 +1415,10 @@ namespace {
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
- Destroyer &destroyer) {
+ Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
- elementType, &destroyer);
+ elementType, destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
@@ -1386,10 +1434,10 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
- Destroyer &destroyer) {
+ Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
- elementType, &destroyer);
+ elementType, destroyer);
}
namespace {
@@ -1442,7 +1490,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DeclPtr = Arg;
} else {
// Otherwise, create a temporary to hold the value.
- DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
+ D.getName() + ".addr");
+ Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ DeclPtr = Alloc;
bool doStore = true;
@@ -1480,7 +1531,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
if (lt == Qualifiers::OCL_Weak) {
EmitARCInitWeak(DeclPtr, Arg);
- doStore = false; // The weak init is a store, no need to do two
+ doStore = false; // The weak init is a store, no need to do two.
}
}
@@ -1491,8 +1542,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Store the initial value into the alloca.
if (doStore) {
LValue lv = MakeAddrLValue(DeclPtr, Ty,
- getContext().getDeclAlign(&D).getQuantity());
- EmitStoreOfScalar(Arg, lv);
+ getContext().getDeclAlign(&D));
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
}
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 3b8f830278b2..10f0b83e4053 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -28,7 +28,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
ASTContext &Context = CGF.getContext();
- unsigned alignment = Context.getDeclAlign(&D).getQuantity();
+ CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
@@ -101,60 +101,159 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
CGF.EmitCXXGlobalDtorRegistration(function, argument);
}
+/// Emit code to cause the variable at the given address to be considered as
+/// constant from this point onwards.
+static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Addr) {
+ // Don't emit the intrinsic if we're not optimizing.
+ if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // Grab the llvm.invariant.start intrinsic.
+ llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+
+ // Emit a call with the size in bytes of the object.
+ CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
+ uint64_t Width = WidthChars.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
+ CGF.Builder.CreateCall(InvariantStart, Args);
+}
+
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ llvm::Constant *DeclPtr,
+ bool PerformInit) {
const Expr *Init = D.getInit();
QualType T = D.getType();
if (!T->isReferenceType()) {
- EmitDeclInit(*this, D, DeclPtr);
- EmitDeclDestroy(*this, D, DeclPtr);
+ if (PerformInit)
+ EmitDeclInit(*this, D, DeclPtr);
+ if (CGM.isTypeConstant(D.getType(), true))
+ EmitDeclInvariant(*this, D, DeclPtr);
+ else
+ EmitDeclDestroy(*this, D, DeclPtr);
return;
}
+ assert(PerformInit && "cannot have constant initializer which needs "
+ "destruction for reference");
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init, &D);
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-void
-CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
- llvm::Constant *DeclPtr) {
- // Generate a global destructor entry if not using __cxa_atexit.
- if (!CGM.getCodeGenOpts().CXAAtExit) {
- CGM.AddCXXDtorEntry(DtorFn, DeclPtr);
- return;
- }
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args);
+}
- // Get the destructor function type
- llvm::Type *DtorFnTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- Int8PtrTy, false);
- DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *ty,
+ const Twine &name);
+
+/// Create a stub function, suitable for being passed to atexit,
+/// which passes the given address to the given destructor function.
+static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, ty,
+ Twine("__dtor_", addr->getName()));
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
+ CGM.getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *dtorFn =
+ dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ call->setCallingConv(dtorFn->getCallingConv());
+
+ CGF.FinishFunction();
- llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
+ return fn;
+}
- // Get the __cxa_atexit function type
- // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
- llvm::FunctionType *AtExitFnTy =
- llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+/// Register a global destructor using atexit.
+static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
- llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
- "__cxa_atexit");
- if (llvm::Function *Fn = dyn_cast<llvm::Function>(AtExitFn))
- Fn->setDoesNotThrow();
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ atexitFn->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(atexit, dtorStub);
+}
+
+void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
+ return;
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ CGM.AddCXXDtorEntry(dtor, addr);
+ return;
+ }
- llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
- "__dso_handle");
- llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
- llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
- Builder.CreateCall(AtExitFn, Args);
+ // Otherwise, we just use atexit.
+ emitGlobalDtorWithAtExit(*this, dtor, addr);
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
- llvm::GlobalVariable *DeclPtr) {
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
// If we've been asked to forbid guard variables, emit an error now.
// This diagnostic is hard-coded for Darwin's use case; we can find
// better phrasing if someone else needs it.
@@ -163,24 +262,24 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
"this initialization requires a guard variable, which "
"the kernel does not support");
- CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr);
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
}
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *FTy,
- StringRef Name) {
+ const Twine &Name) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
- if (!CGM.getContext().getLangOptions().AppleKext) {
+ if (!CGM.getContext().getLangOpts().AppleKext) {
// Set the section if needed.
if (const char *Section =
CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
return Fn;
@@ -188,16 +287,16 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
- llvm::GlobalVariable *Addr) {
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create a variable initialization function.
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
- CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr);
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
+ PerformInit);
if (D->hasAttr<InitPriorityAttr>()) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
@@ -226,9 +325,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global initialization function.
llvm::Function *Fn =
@@ -260,24 +357,23 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
- CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
+ CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
}
/// Emit the code necessary to initialize the given global variable.
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
- llvm::GlobalVariable *Addr) {
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
// Use guarded initialization if the global variable is weak. This
@@ -285,9 +381,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
// definitions explicitly marked weak.
if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
- EmitCXXGuardedInit(*D, Addr);
+ EmitCXXGuardedInit(*D, Addr, PerformInit);
} else {
- EmitCXXGlobalVarDeclInit(*D, Addr);
+ EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
}
FinishFunction();
@@ -297,14 +393,14 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Constant **Decls,
unsigned NumDecls) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
RunCleanupsScope Scope(*this);
// When building in Objective-C++ ARC mode, create an autorelease pool
// around the global initializers.
- if (getLangOptions().ObjCAutoRefCount && getLangOptions().CPlusPlus) {
+ if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EmitObjCAutoreleasePoolCleanup(token);
}
@@ -318,11 +414,11 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
FinishFunction();
}
-void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
// Emit the dtors, in reverse order from construction.
@@ -343,16 +439,17 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
llvm::Function *
CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray) {
FunctionArgList args;
ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
args.push_back(&dst);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
- FunctionType::ExtInfo());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ CGM.getTypes().arrangeFunctionDeclaration(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
@@ -365,4 +462,3 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
return fn;
}
-
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 5e4fb9881937..95e0030866d7 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -11,17 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/StmtCXX.h"
-
-#include "llvm/Intrinsics.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
-
-#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
#include "TargetInfo.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
using namespace clang;
using namespace CodeGen;
@@ -104,7 +100,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
- if (CGM.getLangOptions().SjLjExceptions)
+ if (CGM.getLangOpts().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume");
}
@@ -113,7 +109,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
- if (CGM.getLangOptions().SjLjExceptions)
+ if (CGM.getLangOpts().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
}
@@ -127,9 +123,9 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
StringRef name;
// In C++, use std::terminate().
- if (CGF.getLangOptions().CPlusPlus)
+ if (CGF.getLangOpts().CPlusPlus)
name = "_ZSt9terminatev"; // FIXME: mangling!
- else if (CGF.getLangOptions().ObjC1 &&
+ else if (CGF.getLangOpts().ObjC1 &&
CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
name = "objc_terminate";
else
@@ -145,14 +141,37 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
-const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
-const EHPersonality EHPersonality::GNU_C_SJLJ("__gcc_personality_sj0");
-const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
-const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
-const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
-const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
- "objc_exception_throw");
-const EHPersonality EHPersonality::GNU_ObjCXX("__gnustep_objcxx_personality_v0");
+namespace {
+ /// The exceptions personality for a function.
+ struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ };
+}
+
+const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", 0 };
+const EHPersonality EHPersonality::NeXT_ObjC = { "__objc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", 0};
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", 0 };
+const EHPersonality
+EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
+const EHPersonality
+EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", 0 };
static const EHPersonality &getCPersonality(const LangOptions &L) {
if (L.SjLjExceptions)
@@ -211,10 +230,8 @@ const EHPersonality &EHPersonality::get(const LangOptions &L) {
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getInt32Ty(CGM.getLLVMContext()),
- true),
- Personality.getPersonalityFnName());
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn);
return Fn;
}
@@ -283,17 +300,18 @@ void CodeGenModule::SimplifyPersonality() {
return;
// If we're not in ObjC++ -fexceptions, there's nothing to do.
- if (!Features.CPlusPlus || !Features.ObjC1 || !Features.Exceptions)
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
return;
- const EHPersonality &ObjCXX = EHPersonality::get(Features);
- const EHPersonality &CXX = getCXXPersonality(Features);
- if (&ObjCXX == &CXX ||
- ObjCXX.getPersonalityFnName() == CXX.getPersonalityFnName())
+ const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
+ const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ if (&ObjCXX == &CXX)
return;
- llvm::Function *Fn =
- getModule().getFunction(ObjCXX.getPersonalityFnName());
+ assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
+ "Different EHPersonalities using the same personality function.");
+
+ llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
// Nothing to do if it's unused.
if (!Fn || Fn->use_empty()) return;
@@ -359,7 +377,7 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
/*IsInit*/ true);
// Deactivate the cleanup block.
- CGF.DeactivateCleanupBlock(cleanup);
+ CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
@@ -452,7 +470,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().CXXExceptions)
+ if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -520,7 +538,7 @@ static void emitFilterDispatchBlock(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().CXXExceptions)
+ if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -636,15 +654,14 @@ static bool isNonEHScope(const EHScope &S) {
return false;
}
- // Suppress warning.
- return false;
+ llvm_unreachable("Invalid EHScope Kind!");
}
llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
return 0;
// Check the innermost scope for a cached landing pad. If this is
@@ -734,7 +751,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
- const EHPersonality &personality = EHPersonality::get(getLangOptions());
+ const EHPersonality &personality = EHPersonality::get(getLangOpts());
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
@@ -984,8 +1001,23 @@ static void InitCatchParam(CodeGenFunction &CGF,
if (CatchType->hasPointerRepresentation()) {
llvm::Value *CastExn =
CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
- CGF.Builder.CreateStore(CastExn, ParamAddr);
- return;
+
+ switch (CatchType.getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(ParamAddr, CastExn);
+ return;
+ }
+ llvm_unreachable("bad ownership qualifier!");
}
// Otherwise, it returns a pointer into the exception object.
@@ -1039,10 +1071,12 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
+ CGF.EmitAggExpr(copyExpr,
+ AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -1170,14 +1204,10 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
if (nextIsEnd) {
CGF.Builder.restoreIP(savedIP);
return;
-
- // Otherwise we need to emit and continue at that block.
- } else {
- CGF.EmitBlock(nextBlock);
}
+ // Otherwise we need to emit and continue at that block.
+ CGF.EmitBlock(nextBlock);
}
-
- llvm_unreachable("fell out of loop!");
}
void CodeGenFunction::popCatchScope() {
@@ -1464,7 +1494,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
llvm::LandingPadInst *LPadInst =
Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
getOpaquePersonalityFn(CGM, Personality), 0);
@@ -1511,24 +1541,23 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
EHResumeBlock = createBasicBlock("eh.resume");
Builder.SetInsertPoint(EHResumeBlock);
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
- StringRef RethrowName = Personality.getCatchallRethrowFnName();
- if (!RethrowName.empty()) {
+ const char *RethrowName = Personality.CatchallRethrowFn;
+ if (RethrowName != 0) {
Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
getExceptionFromSlot())
->setDoesNotReturn();
} else {
- llvm::Value *Exn = getExceptionFromSlot();
-
switch (CleanupHackLevel) {
case CHL_MandatoryCatchall:
// In mandatory-catchall mode, we need to use
// _Unwind_Resume_or_Rethrow, or whatever the personality's
// equivalent is.
- Builder.CreateCall(getUnwindResumeOrRethrowFn(), Exn)
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ getExceptionFromSlot())
->setDoesNotReturn();
break;
case CHL_MandatoryCleanup: {
@@ -1552,7 +1581,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
// In an idealized mode where we don't have to worry about the
// optimizer combining landing pads, we should just use
// _Unwind_Resume (or the personality's equivalent).
- Builder.CreateCall(getUnwindResumeFn(), Exn)
+ Builder.CreateCall(getUnwindResumeFn(), getExceptionFromSlot())
->setDoesNotReturn();
break;
}
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
deleted file mode 100644
index d0216160d50f..000000000000
--- a/lib/CodeGen/CGException.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These classes support the generation of LLVM IR for exceptions in
-// C++ and Objective C.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_CGEXCEPTION_H
-#define CLANG_CODEGEN_CGEXCEPTION_H
-
-#include "llvm/ADT/StringRef.h"
-
-namespace clang {
-class LangOptions;
-
-namespace CodeGen {
-
-/// The exceptions personality for a function. When
-class EHPersonality {
- StringRef PersonalityFn;
-
- // If this is non-null, this personality requires a non-standard
- // function for rethrowing an exception after a catchall cleanup.
- // This function must have prototype void(void*).
- StringRef CatchallRethrowFn;
-
- EHPersonality(StringRef PersonalityFn,
- StringRef CatchallRethrowFn = StringRef())
- : PersonalityFn(PersonalityFn),
- CatchallRethrowFn(CatchallRethrowFn) {}
-
-public:
- static const EHPersonality &get(const LangOptions &Lang);
- static const EHPersonality GNU_C;
- static const EHPersonality GNU_C_SJLJ;
- static const EHPersonality GNU_ObjC;
- static const EHPersonality GNU_ObjCXX;
- static const EHPersonality NeXT_ObjC;
- static const EHPersonality GNU_CPlusPlus;
- static const EHPersonality GNU_CPlusPlus_SJLJ;
-
- StringRef getPersonalityFnName() const { return PersonalityFn; }
- StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
-};
-
-}
-}
-
-#endif
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index bd4e553991b3..08970fd73865 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -134,14 +135,16 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
Qualifiers Quals,
bool IsInit) {
- if (E->getType()->isAnyComplexType())
+ // FIXME: This function should take an LValue as an argument.
+ if (E->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
- else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
+ } else if (hasAggregateLLVMType(E->getType())) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit)));
- else {
+ } else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
EmitStoreThroughLValue(RV, LV);
@@ -182,7 +185,7 @@ CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
Out.flush();
@@ -209,13 +212,20 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
// Look through expressions for materialized temporaries (for now).
if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(E)) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
- if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
+ if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
E->getType()->isObjCLifetimeType() &&
(E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
@@ -228,29 +238,21 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
- if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ CGF.enterFullExpression(EWC);
CodeGenFunction::RunCleanupsScope Scope(CGF);
- return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
ObjCARCReferenceLifetimeType,
InitializedDecl);
}
- if (const ObjCPropertyRefExpr *PRE =
- dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
- if (PRE->getGetterResultType()->isReferenceType())
- E = PRE;
-
RValue RV;
if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
- if (LV.isPropertyRef()) {
- RV = CGF.EmitLoadOfPropertyRefLValue(LV);
- return RV.getScalarVal();
- }
if (LV.isSimple())
return LV.getAddress();
@@ -358,10 +360,11 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
AggValueSlot::IsDestructed_t isDestructed
= AggValueSlot::IsDestructed_t(InitializedDecl != 0);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
- isDestructed,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
@@ -484,21 +487,17 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
case Qualifiers::OCL_Strong: {
bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind cleanupKind = getARCCleanupKind();
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
- &destroyARCStrongImprecise;
pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
- *destroyer, cleanupKind & EHCleanup);
+ precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
break;
}
case Qualifiers::OCL_Weak: {
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = &destroyARCWeak;
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
- ObjCARCReferenceLifetimeType, *destroyer, true);
+ ObjCARCReferenceLifetimeType, destroyARCWeak, true);
break;
}
}
@@ -512,10 +511,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
/// input field number being accessed.
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
const llvm::Constant *Elts) {
- if (isa<llvm::ConstantAggregateZero>(Elts))
- return 0;
-
- return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
}
void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
@@ -643,6 +640,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
case Expr::ObjCSelectorExprClass:
return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
@@ -656,6 +656,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
@@ -671,17 +672,28 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitStringLiteralLValue(cast<StringLiteral>(E));
case Expr::ObjCEncodeExprClass:
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
-
- case Expr::BlockDeclRefExprClass:
- return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only single-element init list can be lvalue.");
+ return EmitLValue(cast<InitListExpr>(E)->getInit(0));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
- case Expr::ExprWithCleanupsClass:
- return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
case Expr::CXXScalarValueInitExprClass:
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
@@ -693,8 +705,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
case Expr::ObjCIvarRefExprClass:
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
- case Expr::ObjCPropertyRefExprClass:
- return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
@@ -726,16 +736,188 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
-
+
case Expr::MaterializeTemporaryExprClass:
return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
}
}
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const RecordType *RT = dyn_cast<RecordType>(type))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(),
+ lvalue.getType(), lvalue.getTBAAInfo());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
+ CGM.getCodeGenOpts().StrictEnums &&
+ !ET->getDecl()->isFixed());
+ bool IsBool = hasBooleanRepresentation(Ty);
+ llvm::Type *LTy;
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return NULL;
+
+ llvm::APInt Min;
+ llvm::APInt End;
+ if (IsBool) {
+ Min = llvm::APInt(8, 0);
+ End = llvm::APInt(8, 2);
+ LTy = Int8Ty;
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ LTy = ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+
+ if (End == Min)
+ return NULL;
+
+ llvm::Value *LowAndHigh[2];
+ LowAndHigh[0] = llvm::ConstantInt::get(LTy, Min);
+ LowAndHigh[1] = llvm::ConstantInt::get(LTy, End);
+
+ llvm::LLVMContext &C = getLLVMContext();
+ llvm::MDNode *Range = llvm::MDNode::get(C, LowAndHigh);
+ return Range;
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
@@ -748,19 +930,20 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
Load->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
- return EmitFromMemory(Load, Ty);
-}
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
-static bool isBooleanUnderlyingType(QualType Ty) {
- if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return ET->getDecl()->getIntegerType()->isBooleanType();
- return false;
+ return EmitFromMemory(Load, Ty);
}
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
// This should really always be an i1, but sometimes it's already
// an i8, and it's awkward to track those cases down.
if (Value->getType()->isIntegerTy(1))
@@ -773,7 +956,7 @@ llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
@@ -784,7 +967,8 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
- llvm::MDNode *TBAAInfo) {
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
@@ -792,12 +976,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
Store->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getTBAAInfo(), isInit);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -821,9 +1008,10 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
}
if (LV.isVectorElt()) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
- LV.isVolatileQualified());
- return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
@@ -832,11 +1020,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isExtVectorElt())
return EmitLoadOfExtVectorElementLValue(LV);
- if (LV.isBitField())
- return EmitLoadOfBitfieldLValue(LV);
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return EmitLoadOfPropertyRefLValue(LV);
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
@@ -867,8 +1052,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
}
// Cast to the access type.
- llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
- AI.AccessWidth,
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -914,8 +1098,10 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -932,10 +1118,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
unsigned NumResultElts = ExprVT->getNumElements();
SmallVector<llvm::Constant*, 4> Mask;
- for (unsigned i = 0; i != NumResultElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
- }
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
@@ -948,15 +1132,19 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
- Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
return;
}
@@ -965,11 +1153,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (Dst.isExtVectorElt())
return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
- if (Dst.isBitField())
- return EmitStoreThroughBitfieldLValue(Src, Dst);
-
- assert(Dst.isPropertyRef() && "Unknown LValue type");
- return EmitStoreThroughPropertyRefLValue(Src, Dst);
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
}
// There's special magic for assigning into an ARC-qualified l-value.
@@ -1031,7 +1216,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst);
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -1045,7 +1230,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- if (Dst.getType()->isBooleanType())
+ if (hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
@@ -1143,8 +1328,10 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1158,10 +1345,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// elements and restore the vector mask since it is on the side it will be
// stored.
SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
@@ -1173,11 +1358,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
SmallVector<llvm::Constant*, 4> ExtMask;
- unsigned i;
- for (i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
- for (; i != NumDstElts; ++i)
- ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
@@ -1186,13 +1369,11 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// build identity
SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+ Mask.push_back(Builder.getInt32(i));
// modify when what gets shuffled in
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned Idx = getAccessedFieldNo(i, Elts);
- Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
} else {
@@ -1206,7 +1387,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
- Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
}
// setObjCGCLValueClass - sets class of he lvalue for the purpose of
@@ -1215,7 +1398,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LValue &LV,
bool IsMemberAccess=false) {
- if (Ctx.getLangOptions().getGC() == LangOptions::NonGC)
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
return;
if (isa<ObjCIvarRefExpr>(E)) {
@@ -1323,14 +1506,19 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
"Var decl must have external storage or be a file var decl!");
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
- if (VD->getType()->isReferenceType())
- V = CGF.Builder.CreateLoad(V);
-
- V = EmitBitCastOfLValueToProperType(CGF, V,
- CGF.getTypes().ConvertTypeForMem(E->getType()));
-
- unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
- LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ QualType T = E->getType();
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ }
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
}
@@ -1350,13 +1538,21 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
}
}
- unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(ND);
+ QualType T = E->getType();
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl *VD = cast<ValueDecl>(ND);
@@ -1365,30 +1561,46 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
-
// Check if this is a global variable.
if (VD->hasExternalStorage() || VD->isFileVarDecl())
return EmitGlobalVarDeclLValue(*this, E, VD);
+ bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+
bool NonGCable = VD->hasLocalStorage() &&
!VD->getType()->isReferenceType() &&
- !VD->hasAttr<BlocksAttr>();
+ !isBlockVariable;
llvm::Value *V = LocalDeclMap[VD];
if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
+
+ // Use special handling for lambdas.
+ if (!V) {
+ if (FieldDecl *FD = LambdaCaptureFields.lookup(VD))
+ return EmitLValueForField(CXXABIThisValue, FD, 0);
+
+ assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
+ CharUnits alignment = getContext().getDeclAlign(VD);
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
+ E->getType(), alignment);
+ }
+
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (VD->hasAttr<BlocksAttr>())
+ if (isBlockVariable)
V = BuildBlockByrefAddress(V, VD);
-
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V);
- V = EmitBitCastOfLValueToProperType(*this, V,
- getTypes().ConvertTypeForMem(E->getType()));
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = MakeAddrLValue(V, T, Alignment);
+ }
- LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
@@ -1401,16 +1613,6 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return EmitFunctionDeclLValue(*this, E, fn);
llvm_unreachable("Unhandled DeclRefExpr");
-
- // an invalid LValue, but the assert will
- // ensure that this point is never reached.
- return LValue();
-}
-
-LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
- unsigned Alignment =
- getContext().getDeclAlign(E->getDecl()).getQuantity();
- return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
@@ -1425,15 +1627,15 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1444,9 +1646,10 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
llvm::Value *Addr = LV.getAddress();
- // real and imag are valid on scalars. This is a faster way of
- // testing that.
- if (!cast<llvm::PointerType>(Addr->getType())
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !cast<llvm::PointerType>(Addr->getType())
->getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
@@ -1588,7 +1791,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType());
+ E->getBase()->getType(), LHS.getAlignment());
}
// Extend or truncate the index type to 32 or 64-bits.
@@ -1617,7 +1820,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
- unsigned ArrayAlignment = 0;
+ CharUnits ArrayAlignment;
if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
@@ -1632,7 +1835,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- if (getLangOptions().isSignedOverflowDefined()) {
+ if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
} else {
@@ -1667,14 +1870,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
@@ -1684,17 +1887,21 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
// Limit the alignment to that of the result type.
- if (ArrayAlignment) {
- unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
+ LValue LV;
+ if (!ArrayAlignment.isZero()) {
+ CharUnits Align = getContext().getTypeAlignInChars(T);
ArrayAlignment = std::min(Align, ArrayAlignment);
+ LV = MakeAddrLValue(Address, T, ArrayAlignment);
+ } else {
+ LV = MakeNaturalAlignAddrLValue(Address, T);
}
- LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC) {
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -1702,13 +1909,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
}
static
-llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
SmallVector<unsigned, 4> &Elts) {
SmallVector<llvm::Constant*, 4> CElts;
-
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
+ CElts.push_back(Builder.getInt32(Elts[i]));
return llvm::ConstantVector::get(CElts);
}
@@ -1751,22 +1956,20 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
+ llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignment());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
llvm::Constant *BaseElts = Base.getExtVectorElts();
SmallVector<llvm::Constant *, 4> CElts;
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- if (isa<llvm::ConstantAggregateZero>(BaseElts))
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
- else
- CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
- }
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
+ Base.getAlignment());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -1847,6 +2050,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
+ CharUnits alignment = getContext().getDeclAlign(field);
bool mayAlias = rec->hasAttr<MayAliasAttr>();
@@ -1863,6 +2067,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+ load->setAlignment(alignment.getQuantity());
if (CGM.shouldUseTBAA()) {
llvm::MDNode *tbaa;
@@ -1876,6 +2081,10 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
addr = load;
mayAlias = false;
type = refType->getPointeeType();
+ if (type->isIncompleteType())
+ alignment = CharUnits();
+ else
+ alignment = getContext().getTypeAlignInChars(type);
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
@@ -1891,7 +2100,6 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
- unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
LV.getQuals().addCVRQualifiers(cvr);
@@ -1932,11 +2140,16 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
- unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(Field);
return MakeAddrLValue(V, FieldType, Alignment);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType());
+ }
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -1957,6 +2170,8 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitAggExprToLValue(expr);
}
+ OpaqueValueMapping binding(*this, expr);
+
const Expr *condExpr = expr->getCond();
bool CondExprBool;
if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
@@ -1967,8 +2182,6 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitLValue(live);
}
- OpaqueValueMapping binding(*this, expr);
-
llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
@@ -2020,21 +2233,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
-
- case CK_GetObjCProperty: {
- LValue LV = EmitLValue(E->getSubExpr());
- assert(LV.isPropertyRef());
- RValue RV = EmitLoadOfPropertyRefLValue(LV);
-
- // Property is an aggregate r-value.
- if (RV.isAggregate()) {
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
- }
-
- // Implicit property returns an l-value.
- assert(RV.isScalar());
- return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
- }
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValue:
@@ -2071,11 +2274,13 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_AnyPointerToBlockPointerCast:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
- case CK_ARCExtendBlockObject: {
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
@@ -2163,7 +2368,7 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
}
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
- assert(e->isGLValue() || e->getType()->isRecordType());
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
return getOpaqueLValueMapping(e);
}
@@ -2206,7 +2411,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
DestroyedType->isObjCLifetimeType() &&
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
@@ -2342,7 +2547,14 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
- EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
return MakeAddrLValue(Slot.getAddr(), E->getType());
}
@@ -2427,7 +2639,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FnType);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -2446,11 +2659,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
- if (isa<FunctionNoProtoType>(FnType) &&
- !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) {
- assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0))
- ->isVarArg());
- llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false);
+ if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
}
@@ -2481,7 +2691,17 @@ static void
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
- if (E->isCmpXChg()) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
// Note that cmpxchg only supports specifying one ordering and
// doesn't support weak cmpxchg, at least at the moment.
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@@ -2498,7 +2718,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Load) {
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
Load->setAlignment(Size);
@@ -2508,7 +2730,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Store) {
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
assert(!Dest && "Store does not return a value");
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
@@ -2519,25 +2743,74 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- case AtomicExpr::CmpXchgStrong:
- case AtomicExpr::Store:
- case AtomicExpr::Load: assert(0 && "Already handled!");
- case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
- case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
- case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
- case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
- case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
- case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
}
+
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
llvm::AtomicRMWInst *RMWI =
CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
StoreDest->setAlignment(Align);
}
@@ -2562,7 +2835,9 @@ static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
- QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
@@ -2571,84 +2846,202 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getTargetInfo().getMaxAtomicInlineWidth();
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ assert(!Dest && "Init does not return a value");
+ if (!hasAggregateLLVMType(E->getVal1()->getType())) {
+ QualType PointeeType
+ = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
+ EmitScalarInit(EmitScalarExpr(E->getVal1()),
+ LValue::MakeAddr(Ptr, PointeeType, alignChars,
+ getContext()));
+ } else if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
+ AtomicTy.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(E->getVal1(), Slot);
+ }
+ return RValue::get(0);
+ }
+
Order = EmitScalarExpr(E->getOrder());
- if (E->isCmpXChg()) {
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
Val1 = EmitScalarExpr(E->getVal1());
- Val2 = EmitValToTemp(*this, E->getVal2());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- (void)OrderFail; // OrderFail is unused at the moment
- } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
- MemTy->isPointerType()) {
- // For pointers, we're required to do a bit of math: adding 1 to an int*
- // is not the same as adding 1 to a uintptr_t.
- QualType Val1Ty = E->getVal1()->getType();
- llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
- CharUnits PointeeIncAmt =
- getContext().getTypeSizeInChars(MemTy->getPointeeType());
- Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
- } else if (E->getOp() != AtomicExpr::Load) {
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
}
- if (E->getOp() != AtomicExpr::Store && !Dest)
+ if (!E->getType()->isVoidType() && !Dest)
Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
- // FIXME: Finalize what the libcalls are actually supposed to look like.
- // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
- return EmitUnsupportedRValue(E, "atomic library call");
- }
-#if 0
- if (UseLibcall) {
- const char* LibCallName;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::CmpXchgStrong:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
- case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
- case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
- case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
- case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
- case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break;
- case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
- case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break;
- }
- llvm::SmallVector<QualType, 4> Params;
+
+ llvm::SmallVector<QualType, 5> Params;
CallArgList Args;
- QualType RetTy = getContext().VoidTy;
- if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ // Size is always the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ // Atomic address is always the second parameter
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
getContext().VoidPtrTy);
- if (E->getOp() != AtomicExpr::Load)
+
+ const char* LibCallName;
+ QualType RetTy = getContext().VoidTy;
+ switch (E->getOp()) {
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
- if (E->isCmpXChg()) {
Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
getContext().VoidPtrTy);
- RetTy = getContext().IntTy;
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+#if 0
+ // These are only defined for 1-16 byte integers. It is not clear what
+ // their semantics would be on anything else...
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+#endif
+ default: return EmitUnsupportedRValue(E, "atomic library call");
}
- Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
- getContext().getSizeType());
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
+ CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
if (E->isCmpXChg())
return Res;
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), Dest);
}
-#endif
+
llvm::Type *IPtrTy =
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
llvm::Value *OrigDest = Dest;
@@ -2684,24 +3077,31 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
default: // invalid order
// We should not ever get here normally, but it's hard to
// enforce that in general.
- break;
+ break;
}
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
// Long case, when Order isn't obviously constant.
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
MonotonicBB = createBasicBlock("monotonic", CurFn);
- if (E->getOp() != AtomicExpr::Store)
+ if (!IsStore)
AcquireBB = createBasicBlock("acquire", CurFn);
- if (E->getOp() != AtomicExpr::Load)
+ if (!IsLoad)
ReleaseBB = createBasicBlock("release", CurFn);
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+ if (!IsLoad && !IsStore)
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
@@ -2718,7 +3118,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
Builder.CreateBr(ContBB);
- if (E->getOp() != AtomicExpr::Store) {
+ if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
@@ -2726,14 +3126,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
}
- if (E->getOp() != AtomicExpr::Load) {
+ if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
}
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+ if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
@@ -2748,7 +3148,102 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::Value *ULPs = llvm::ConstantFP::get(Builder.getFloatTy(), Accuracy);
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), ULPs);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy,
+ Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
+ !ov->getType()->isAnyComplexType()) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 97754d5c0ba6..b6efc1cafaaa 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
@@ -74,12 +75,17 @@ public:
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
- void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
+ unsigned Alignment = 0);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+ void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
+ void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E);
+
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
- if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
+ if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
return AggValueSlot::NeedsGCBarriers;
return AggValueSlot::DoesNotNeedGCBarriers;
}
@@ -103,7 +109,24 @@ public:
}
// l-values.
- void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // For aggregates, we should always be able to emit the variable
+ // as an l-value unless it's a reference. This is due to the fact
+ // that we can't actually ever see a normal l2r conversion on an
+ // aggregate in C++, and in C there's no language standard
+ // actively preventing us from listing variables in the captures
+ // list of a block.
+ if (E->getDecl()->getType()->isReferenceType()) {
+ if (CodeGenFunction::ConstantEmission result
+ = CGF.tryEmitAsConstant(E)) {
+ EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ return;
+ }
+ }
+
+ EmitAggLoadOfLValue(E);
+ }
+
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
@@ -111,9 +134,6 @@ public:
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
- void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- EmitAggLoadOfLValue(E);
- }
void VisitPredefinedExpr(const PredefinedExpr *E) {
EmitAggLoadOfLValue(E);
}
@@ -131,7 +151,6 @@ public:
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
EmitAggLoadOfLValue(E);
}
- void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
@@ -142,12 +161,22 @@ public:
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitLambdaExpr(LambdaExpr *E);
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+ void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ if (E->isGLValue()) {
+ LValue LV = CGF.EmitPseudoObjectLValue(E);
+ return EmitFinalDestCopy(E, LV);
+ }
+
+ CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ }
+
void VisitVAArgExpr(VAArgExpr *E);
void EmitInitializationToLValue(Expr *E, LValue Address);
@@ -213,7 +242,8 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
+ unsigned Alignment) {
assert(Src.isAggregate() && "value must be aggregate value!");
// If Dest is ignored, then we're evaluating an aggregate expression
@@ -224,7 +254,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// volatile.
if (Dest.isIgnored()) {
if (!Src.isVolatileQualified() ||
- CGF.CGM.getLangOptions().CPlusPlus ||
+ CGF.CGM.getLangOpts().CPlusPlus ||
(IgnoreResult && Ignore))
return;
@@ -248,16 +278,239 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// from the source as well, as we can't eliminate it if either operand
// is volatile, unless copy has volatile for both source and destination..
CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
- Dest.isVolatile()|Src.isVolatileQualified());
+ Dest.isVolatile()|Src.isVolatileQualified(),
+ Alignment);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
- EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
- Src.isVolatileQualified()),
- Ignore);
+ CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
+ EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+}
+
+static QualType GetStdInitializerListElementType(QualType T) {
+ // Just assume that this is really std::initializer_list.
+ ClassTemplateSpecializationDecl *specialization =
+ cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
+ return specialization->getTemplateArgs()[0].getAsType();
+}
+
+/// \brief Prepare cleanup for the temporary array.
+static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
+ QualType arrayType,
+ llvm::Value *addr,
+ const InitListExpr *initList) {
+ QualType::DestructionKind dtorKind = arrayType.isDestructedType();
+ if (!dtorKind)
+ return; // Type doesn't need destroying.
+ if (dtorKind != QualType::DK_cxx_destructor) {
+ CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
+ return;
+ }
+
+ CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
+ CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
+ /*EHCleanup=*/true);
+}
+
+/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// real initializer list.
+void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
+ InitListExpr *initList) {
+ // We emit an array containing the elements, then have the init list point
+ // at the array.
+ ASTContext &ctx = CGF.getContext();
+ unsigned numInits = initList->getNumInits();
+ QualType element = GetStdInitializerListElementType(initList->getType());
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(array);
+ llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
+ alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
+ alloc->setName(".initlist.");
+
+ EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
+
+ // FIXME: The diagnostics are somewhat out of place here.
+ RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ QualType elementPtr = ctx.getPointerType(element.withConst());
+
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue start = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
+ ++field;
+
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue endOrLength = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
+ } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
+ } else {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ if (!Dest.isExternallyDestructed())
+ EmitStdInitializerListCleanup(CGF, array, alloc, initList);
+}
+
+/// \brief Emit initialization of an array from an initializer list.
+void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E) {
+ uint64_t NumInitElements = E->getNumInits();
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ assert(NumInitElements <= NumArrayElements);
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // If these are nested std::initializer_list inits, do them directly,
+ // because they are conceptually the same "location".
+ InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
+ if (initList && initList->initializesStdInitializerList()) {
+ EmitStdInitializerList(element, initList);
+ } else {
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
+ }
+
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
+
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
+ else
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
+ }
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
}
//===----------------------------------------------------------------------===//
@@ -325,16 +578,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"should have been unpacked before we got here");
}
- case CK_GetObjCProperty: {
- LValue LV = CGF.EmitLValue(E->getSubExpr());
- assert(LV.isPropertyRef());
- RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
- EmitMoveFromReturnSlot(E, RV);
- break;
- }
-
case CK_LValueToRValue: // hope for downstream optimization
case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
@@ -345,7 +592,6 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_LValueBitCast:
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
- break;
case CK_Dependent:
case CK_BitCast:
@@ -356,6 +602,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
@@ -385,6 +632,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -404,11 +652,6 @@ void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
EmitMoveFromReturnSlot(E, RV);
}
-void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- llvm_unreachable("direct property access not surrounded by "
- "lvalue-to-rvalue cast");
-}
-
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
CGF.EmitIgnoredExpr(E->getLHS());
Visit(E->getRHS());
@@ -456,29 +699,13 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LHS = CGF.EmitLValue(E->getLHS());
- // We have to special case property setters, otherwise we must have
- // a simple lvalue (no aggregates inside vectors, bitfields).
- if (LHS.isPropertyRef()) {
- const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
- QualType ArgType = RE->getSetterArgType();
- RValue Src;
- if (ArgType->isReferenceType())
- Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
- else {
- AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), Slot);
- Src = Slot.asRValue();
- }
- CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
- } else {
- // Codegen the RHS so that it stores directly into the LHS.
- AggValueSlot LHSSlot =
- AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
- needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased);
- CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
- EmitFinalDestCopy(E, LHS, true);
- }
+ // Codegen the RHS so that it stores directly into the LHS.
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
+ EmitFinalDestCopy(E, LHS, true);
}
void AggExprEmitter::
@@ -547,7 +774,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Push that destructor we promised.
if (!wasExternallyDestructed)
- CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
}
void
@@ -556,8 +783,16 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
CGF.EmitCXXConstructExpr(E, Slot);
}
+void
+AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitLambdaExpr(E, Slot);
+}
+
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.EmitExprWithCleanups(E, Dest);
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ Visit(E->getSubExpr());
}
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
@@ -636,9 +871,16 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
return;
if (!CGF.hasAggregateLLVMType(type)) {
- // For non-aggregates, we can store zero
+ // For non-aggregates, we can store zero.
llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
- CGF.EmitStoreThroughLValue(RValue::get(null), lv);
+ // Note that the following is not equivalent to
+ // EmitStoreThroughBitfieldLValue for ARC types.
+ if (lv.isBitField()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
+ } else {
+ assert(lv.isSimple());
+ CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
+ }
} else {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
@@ -665,17 +907,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- llvm::Value *DestPtr = Dest.getAddr();
+ if (E->initializesStdInitializerList()) {
+ EmitStdInitializerList(Dest.getAddr(), E);
+ return;
+ }
+
+ llvm::Value *DestPtr = EnsureSlot(E->getType()).getAddr();
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- llvm::PointerType *APType =
- cast<llvm::PointerType>(DestPtr->getType());
- llvm::ArrayType *AType =
- cast<llvm::ArrayType>(APType->getElementType());
-
- uint64_t NumInitElements = E->getNumInits();
-
if (E->getNumInits() > 0) {
QualType T1 = E->getType();
QualType T2 = E->getInit(0)->getType();
@@ -685,136 +925,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
}
- uint64_t NumArrayElements = AType->getNumElements();
- assert(NumInitElements <= NumArrayElements);
-
- QualType elementType = E->getType().getCanonicalType();
- elementType = CGF.getContext().getQualifiedType(
- cast<ArrayType>(elementType)->getElementType(),
- elementType.getQualifiers() + Dest.getQualifiers());
-
- // DestPtr is an array*. Construct an elementType* by drilling
- // down a level.
- llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
- llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
-
- // Exception safety requires us to destroy all the
- // already-constructed members if an initializer throws.
- // For that, we'll need an EH cleanup.
- QualType::DestructionKind dtorKind = elementType.isDestructedType();
- llvm::AllocaInst *endOfInit = 0;
- EHScopeStack::stable_iterator cleanup;
- if (CGF.needsEHCleanup(dtorKind)) {
- // In principle we could tell the cleanup where we are more
- // directly, but the control flow can get so varied here that it
- // would actually be quite complex. Therefore we go through an
- // alloca.
- endOfInit = CGF.CreateTempAlloca(begin->getType(),
- "arrayinit.endOfInit");
- Builder.CreateStore(begin, endOfInit);
- CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
- CGF.getDestroyer(dtorKind));
- cleanup = CGF.EHStack.stable_begin();
-
- // Otherwise, remember that we didn't need a cleanup.
- } else {
- dtorKind = QualType::DK_none;
- }
-
- llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
-
- // The 'current element to initialize'. The invariants on this
- // variable are complicated. Essentially, after each iteration of
- // the loop, it points to the last initialized element, except
- // that it points to the beginning of the array before any
- // elements have been initialized.
- llvm::Value *element = begin;
-
- // Emit the explicit initializers.
- for (uint64_t i = 0; i != NumInitElements; ++i) {
- // Advance to the next element.
- if (i > 0) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
-
- // Tell the cleanup that it needs to destroy up to this
- // element. TODO: some of these stores can be trivially
- // observed to be unnecessary.
- if (endOfInit) Builder.CreateStore(element, endOfInit);
- }
-
- LValue elementLV = CGF.MakeAddrLValue(element, elementType);
- EmitInitializationToLValue(E->getInit(i), elementLV);
- }
-
- // Check whether there's a non-trivial array-fill expression.
- // Note that this will be a CXXConstructExpr even if the element
- // type is an array (or array of array, etc.) of class type.
- Expr *filler = E->getArrayFiller();
- bool hasTrivialFiller = true;
- if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
- assert(cons->getConstructor()->isDefaultConstructor());
- hasTrivialFiller = cons->getConstructor()->isTrivial();
- }
-
- // Any remaining elements need to be zero-initialized, possibly
- // using the filler expression. We can skip this if the we're
- // emitting to zeroed memory.
- if (NumInitElements != NumArrayElements &&
- !(Dest.isZeroed() && hasTrivialFiller &&
- CGF.getTypes().isZeroInitializable(elementType))) {
-
- // Use an actual loop. This is basically
- // do { *array++ = filler; } while (array != end);
-
- // Advance to the start of the rest of the array.
- if (NumInitElements) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
- if (endOfInit) Builder.CreateStore(element, endOfInit);
- }
-
- // Compute the end of the array.
- llvm::Value *end = Builder.CreateInBoundsGEP(begin,
- llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
- "arrayinit.end");
-
- llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
- llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
-
- // Jump into the body.
- CGF.EmitBlock(bodyBB);
- llvm::PHINode *currentElement =
- Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
- currentElement->addIncoming(element, entryBB);
-
- // Emit the actual filler expression.
- LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
- if (filler)
- EmitInitializationToLValue(filler, elementLV);
- else
- EmitNullInitializationToLValue(elementLV);
-
- // Move on to the next element.
- llvm::Value *nextElement =
- Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
-
- // Tell the EH cleanup that we finished with the last element.
- if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
-
- // Leave the loop if we're done.
- llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
- "arrayinit.done");
- llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
- Builder.CreateCondBr(done, endBB, bodyBB);
- currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
-
- CGF.EmitBlock(endBB);
- }
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
- // Leave the partial-array cleanup if we entered one.
- if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
+ llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+ EmitArrayInit(DestPtr, AType, elementType, E);
return;
}
@@ -862,6 +981,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We'll need to enter cleanup scopes in case any of the member
// initializers throw an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ llvm::Instruction *cleanupDominator = 0;
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
@@ -905,6 +1025,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator)
+ cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
+
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
cleanups.push_back(CGF.EHStack.stable_begin());
@@ -924,7 +1047,11 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Deactivate all the partial cleanups in reverse order, which
// generally means popping them.
for (unsigned i = cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(cleanups[i-1]);
+ CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (cleanupDominator)
+ cleanupDominator->eraseFromParent();
}
//===----------------------------------------------------------------------===//
@@ -996,7 +1123,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
// C++ objects with a user-declared constructor don't need zero'ing.
- if (CGF.getContext().getLangOptions().CPlusPlus)
+ if (CGF.getContext().getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -1021,9 +1148,8 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- Loc = CGF.Builder.CreateBitCast(Loc, BP);
+ Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
Align.getQuantity(), false);
@@ -1066,10 +1192,10 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
- bool isVolatile) {
+ bool isVolatile, unsigned Alignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- if (getContext().getLangOptions().CPlusPlus) {
+ if (getContext().getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
@@ -1099,6 +1225,9 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
+ if (!Alignment)
+ Alignment = TypeInfo.second.getQuantity();
+
// FIXME: Handle variable sized types.
// FIXME: If we have a volatile struct, the optimizer can remove what might
@@ -1125,7 +1254,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
@@ -1155,5 +1284,60 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- TypeInfo.second.getQuantity(), isVolatile);
+ Alignment, isVolatile);
+}
+
+void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
+ const Expr *init) {
+ const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
+ if (cleanups)
+ init = cleanups->getSubExpr();
+
+ if (isa<InitListExpr>(init) &&
+ cast<InitListExpr>(init)->initializesStdInitializerList()) {
+ // We initialized this std::initializer_list with an initializer list.
+ // A backing array was created. Push a cleanup for it.
+ EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
+ }
+}
+
+static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
+ llvm::Value *arrayStart,
+ const InitListExpr *init) {
+ // Check if there are any recursive cleanups to do, i.e. if we have
+ // std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
+ // then we need to destroy the inner array as well.
+ for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
+ const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
+ if (!subInit || !subInit->initializesStdInitializerList())
+ continue;
+
+ // This one needs to be destroyed. Get the address of the std::init_list.
+ llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
+ llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
+ "std.initlist");
+ CGF.EmitStdInitializerListCleanup(loc, subInit);
+ }
+}
+
+void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init) {
+ ASTContext &ctx = getContext();
+ QualType element = GetStdInitializerListElementType(init->getType());
+ unsigned numInits = init->getNumInits();
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
+ QualType arrayPtr = ctx.getPointerType(array);
+ llvm::Type *arrayPtrType = ConvertType(arrayPtr);
+
+ // lvalue is the location of a std::initializer_list, which as its first
+ // element has a pointer to the array we want to destroy.
+ llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
+ llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
+
+ ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
+
+ llvm::Value *arrayAddress =
+ Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
+ ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 78db5903de54..d3ba77074700 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -33,8 +33,6 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
CallArgList Args;
// Push the this ptr.
@@ -45,13 +43,16 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
Args.add(RValue::get(VTT), T);
}
+
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
- // And the rest of the call args
+ // And the rest of the call args.
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
- QualType ResultType = FPT->getResultType();
- return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
- FPT->getExtInfo()),
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
+ FPT->getExtInfo(),
+ required),
Callee, ReturnValue, Args, MD);
}
@@ -112,7 +113,7 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
// When building with -fapple-kext, all calls must go through the vtable since
// the kernel linker can do runtime patching of vtables.
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return false;
// If the most derived class is marked final, we know that no subclass can
@@ -229,17 +230,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// Compute the function type we're calling.
const CGFunctionInfo *FInfo = 0;
if (isa<CXXDestructorDecl>(MD))
- FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
else if (isa<CXXConstructorDecl>(MD))
- FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
- Ctor_Complete);
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
+ cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
else
- FInfo = &CGM.getTypes().getFunctionInfo(MD);
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -256,7 +256,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- if (getContext().getLangOptions().AppleKext &&
+ if (getContext().getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -269,7 +269,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = BuildVirtualCall(MD, This, Ty);
} else {
- if (getContext().getLangOptions().AppleKext &&
+ if (getContext().getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -322,7 +322,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
ReturnValue, Args);
}
@@ -427,7 +427,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Elide the constructor if we're constructing from a temporary.
// The temporary check is required because Sema sets this on NRVO
// returns.
- if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
assert(getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(0)->getType()));
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
@@ -508,6 +508,7 @@ static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
const CXXNewExpr *e,
+ unsigned minElements,
llvm::Value *&numElements,
llvm::Value *&sizeWithoutCookie) {
QualType type = e->getAllocatedType();
@@ -581,6 +582,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// Okay, compute a count at the right width.
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+ // If there is a brace-initializer, we cannot allocate fewer elements than
+ // there are initializers. If we do, that's treated like an overflow.
+ if (adjustedCount.ult(minElements))
+ hasAnyOverflow = true;
+
// Scale numElements by that. This might overflow, but we don't
// care because it only overflows if allocationSize does, too, and
// if that overflows then we shouldn't use this.
@@ -612,14 +618,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// Otherwise, we might need to use the overflow intrinsics.
} else {
- // There are up to four conditions we need to test for:
+ // There are up to five conditions we need to test for:
// 1) if isSigned, we need to check whether numElements is negative;
// 2) if numElementsWidth > sizeWidth, we need to check whether
// numElements is larger than something representable in size_t;
- // 3) we need to compute
+ // 3) if minElements > 0, we need to check whether numElements is smaller
+ // than that.
+ // 4) we need to compute
// sizeWithoutCookie := numElements * typeSizeMultiplier
// and check whether it overflows; and
- // 4) if we need a cookie, we need to compute
+ // 5) if we need a cookie, we need to compute
// size := sizeWithoutCookie + cookieSize
// and check whether it overflows.
@@ -646,10 +654,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// If there's a non-1 type size multiplier, then we can do the
// signedness check at the same time as we do the multiply
// because a negative number times anything will cause an
- // unsigned overflow. Otherwise, we have to do it here.
+ // unsigned overflow. Otherwise, we have to do it here. But at least
+ // in this case, we can subsume the >= minElements check.
if (typeSizeMultiplier == 1)
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
- llvm::ConstantInt::get(CGF.SizeTy, 0));
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
// Otherwise, zext up to size_t if necessary.
} else if (numElementsWidth < sizeWidth) {
@@ -658,6 +667,21 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
assert(numElements->getType() == CGF.SizeTy);
+ if (minElements) {
+ // Don't allow allocation of fewer elements than we have initializers.
+ if (!hasOverflow) {
+ hasOverflow = CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+ } else if (numElementsWidth > sizeWidth) {
+ // The other existing overflow subsumes this check.
+ // We do an unsigned comparison, since any signed value < -1 is
+ // taken care of either above or below.
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow,
+ CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements)));
+ }
+ }
+
size = numElements;
// Multiply by the type size if necessary. This multiplier
@@ -741,30 +765,26 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
return size;
}
-static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
- llvm::Value *NewPtr) {
-
- assert(E->getNumConstructorArgs() == 1 &&
- "Can only have one argument to initializer of POD type.");
-
- const Expr *Init = E->getConstructorArg(0);
- QualType AllocType = E->getAllocatedType();
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
+ QualType AllocType, llvm::Value *NewPtr) {
- unsigned Alignment =
- CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment),
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
+ Alignment),
false);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
+ = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(Init, Slot);
+
+ CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
}
}
@@ -773,30 +793,67 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
QualType elementType,
llvm::Value *beginPtr,
llvm::Value *numElements) {
- // We have a POD type.
- if (E->getNumConstructorArgs() == 0)
- return;
-
- // Check if the number of elements is constant.
- bool checkZero = true;
- if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
- // If it's constant zero, skip the whole loop.
- if (constNum->isZero()) return;
-
- checkZero = false;
- }
+ if (!E->hasInitializer())
+ return; // We have a POD type.
+ llvm::Value *explicitPtr = beginPtr;
// Find the end of the array, hoisted out of the loop.
llvm::Value *endPtr =
Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
+ unsigned initializerElements = 0;
+
+ const Expr *Init = E->getInitializer();
+ llvm::AllocaInst *endOfInit = 0;
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ // If the initializer is an initializer list, first do the explicit elements.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ initializerElements = ILE->getNumInits();
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
+ cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
+ pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ }
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
+ explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
+ }
+
+ // The remaining elements are filled with the array filler expression.
+ Init = ILE->getArrayFiller();
+ }
+
// Create the continuation block.
llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
- // If we need to check for zero, do so now.
- if (checkZero) {
+ // If the number of elements isn't constant, we have to now check if there is
+ // anything left to initialize.
+ if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
+ // If all elements have already been initialized, skip the whole loop.
+ if (constNum->getZExtValue() <= initializerElements) {
+ // If there was a cleanup, deactivate it.
+ if (cleanupDominator)
+ DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ return;
+ }
+ } else {
llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
- llvm::Value *isEmpty = Builder.CreateICmpEQ(beginPtr, endPtr,
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
"array.isempty");
Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
EmitBlock(nonEmptyBB);
@@ -810,24 +867,28 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
// Set up the current-element phi.
llvm::PHINode *curPtr =
- Builder.CreatePHI(beginPtr->getType(), 2, "array.cur");
- curPtr->addIncoming(beginPtr, entryBB);
+ Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
+ curPtr->addIncoming(explicitPtr, entryBB);
+
+ // Store the new cleanup position for irregular cleanups.
+ if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
// Enter a partial-destruction cleanup if necessary.
- QualType::DestructionKind dtorKind = elementType.isDestructedType();
- EHScopeStack::stable_iterator cleanup;
- if (needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator && needsEHCleanup(dtorKind)) {
pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
getDestroyer(dtorKind));
cleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
}
// Emit the initializer into this element.
- StoreAnyExprIntoOneUnit(*this, E, curPtr);
+ StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
// Leave the cleanup if we entered one.
- if (cleanup != EHStack.stable_end())
- DeactivateCleanupBlock(cleanup);
+ if (cleanupDominator) {
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
// Advance to the next element.
llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
@@ -854,13 +915,15 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
+ const Expr *Init = E->getInitializer();
if (E->isArray()) {
- if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
+ CXXConstructorDecl *Ctor = CCE->getConstructor();
bool RequiresZeroInitialization = false;
- if (Ctor->getParent()->hasTrivialDefaultConstructor()) {
+ if (Ctor->isTrivial()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
- if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
+ if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
return;
if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
@@ -873,43 +936,25 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
RequiresZeroInitialization = true;
}
- CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
- E->constructor_arg_begin(),
- E->constructor_arg_end(),
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ CCE->arg_begin(), CCE->arg_end(),
RequiresZeroInitialization);
return;
- } else if (E->getNumConstructorArgs() == 1 &&
- isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
+ } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
+ CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
// Optimization: since zero initialization will just set the memory
// to all zeroes, generate a single memset to do it in one shot.
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
return;
- } else {
- CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
- return;
}
- }
-
- if (CXXConstructorDecl *Ctor = E->getConstructor()) {
- // Per C++ [expr.new]p15, if we have an initializer, then we're performing
- // direct initialization. C++ [dcl.init]p5 requires that we
- // zero-initialize storage if there are no user-declared constructors.
- if (E->hasInitializer() &&
- !Ctor->getParent()->hasUserDeclaredConstructor() &&
- !Ctor->getParent()->isEmpty())
- CGF.EmitNullInitialization(NewPtr, ElementType);
-
- CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- NewPtr, E->constructor_arg_begin(),
- E->constructor_arg_end());
-
+ CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
return;
}
- // We have a POD type.
- if (E->getNumConstructorArgs() == 0)
+
+ if (!Init)
return;
-
- StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
+
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
}
namespace {
@@ -961,7 +1006,7 @@ namespace {
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1022,7 +1067,7 @@ namespace {
}
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1057,7 +1102,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
SavedNewPtr,
@@ -1066,7 +1111,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
Cleanup->setPlacementArg(I,
DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
- CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
+ CGF.initFullExprCleanup();
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
@@ -1083,10 +1128,18 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ unsigned minElements = 0;
+ if (E->isArray() && E->hasInitializer()) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ minElements = ILE->getNumInits();
+ }
+
llvm::Value *numElements = 0;
llvm::Value *allocSizeWithoutCookie = 0;
llvm::Value *allocSize =
- EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie);
+ EmitCXXNewAllocSize(*this, E, minElements, numElements,
+ allocSizeWithoutCookie);
allocatorArgs.add(RValue::get(allocSize), sizeType);
@@ -1129,7 +1182,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
- RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
+ RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
+ allocatorType),
CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
allocatorArgs, allocator);
}
@@ -1140,7 +1194,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
// interesting initializer.
bool nullCheck = allocatorType->isNothrow(getContext()) &&
- !(allocType.isPODType(getContext()) && !E->hasInitializer());
+ (!allocType.isPODType(getContext()) || E->hasInitializer());
llvm::BasicBlock *nullCheckBB = 0;
llvm::BasicBlock *contBB = 0;
@@ -1168,10 +1222,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
EHScopeStack::stable_iterator operatorDeleteCleanup;
+ llvm::Instruction *cleanupDominator = 0;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
}
assert((allocSize == allocSizeWithoutCookie) ==
@@ -1200,9 +1256,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// Deactivate the 'operator delete' cleanup if we finished
// initialization.
- if (operatorDeleteCleanup.isValid())
- DeactivateCleanupBlock(operatorDeleteCleanup);
-
+ if (operatorDeleteCleanup.isValid()) {
+ DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
if (nullCheck) {
conditional.end(*this);
@@ -1248,7 +1306,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
- EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
+ EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
DeleteArgs, DeleteFD);
}
@@ -1295,9 +1353,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
llvm::Type *Ty =
- CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
- Dtor_Complete),
- /*isVariadic=*/false);
+ CGF.getTypes().GetFunctionType(
+ CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
llvm::Value *Callee
= CGF.BuildVirtualCall(Dtor,
@@ -1324,7 +1381,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Ptr);
- else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ else if (CGF.getLangOpts().ObjCAutoRefCount &&
ElementType->isObjCLifetimeType()) {
switch (ElementType.getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -1405,7 +1462,7 @@ namespace {
}
// Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), Args, OperatorDelete);
}
@@ -1514,10 +1571,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
-
- llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
}
@@ -1598,7 +1652,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
// const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -1612,11 +1666,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
-
- llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, false);
-
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
}
@@ -1762,3 +1812,19 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
return Value;
}
+
+void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
+ RunCleanupsScope Scope(*this);
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+ LValue LV = EmitLValueForFieldInitialization(Slot.getAddr(), *CurField, 0);
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
+}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 4a31bcfbe9e0..0233745afa91 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -64,11 +64,8 @@ public:
}
ComplexPairTy EmitLoadOfLValue(LValue LV) {
- if (LV.isSimple())
- return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
/// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
@@ -78,11 +75,8 @@ public:
/// EmitStoreThroughLValue - Given an l-value of complex type, store
/// a complex number into it.
void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
- if (LV.isSimple())
- return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- CGF.EmitStoreThroughPropertyRefLValue(RValue::getComplex(Val), LV);
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
}
/// EmitStoreOfComplex - Store the specified real/imag parts into the
@@ -117,13 +111,18 @@ public:
}
// l-values.
- ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitBlockDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+
+ llvm::ConstantStruct *pair =
+ cast<llvm::ConstantStruct>(result.getValue());
+ return ComplexPairTy(pair->getOperand(0), pair->getOperand(1));
+ }
return EmitLoadOfLValue(E);
}
- ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- assert(E->getObjectKind() == OK_Ordinary);
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -137,6 +136,10 @@ public:
return CGF.getOpaqueRValueMapping(E).getComplexVal();
}
+ ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getComplexVal();
+ }
+
// FIXME: CompoundLiteralExpr
ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
@@ -185,7 +188,9 @@ public:
return Visit(DAE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
- return CGF.EmitExprWithCleanups(E).getComplexVal();
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
@@ -265,6 +270,10 @@ public:
ComplexPairTy VisitInitListExpr(InitListExpr *E);
+ ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
@@ -362,12 +371,10 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
- case CK_GetObjCProperty: {
- LValue LV = CGF.EmitLValue(Op);
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
- }
-
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_LValueToRValue:
case CK_UserDefinedConversion:
@@ -394,6 +401,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
@@ -418,6 +426,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -631,11 +640,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return Val;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LV.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -670,11 +675,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LV = EmitBinAssignLValue(E, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return Val;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LV.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -831,7 +832,6 @@ EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
default:
llvm_unreachable("unexpected complex compound assignment");
- Op = 0;
}
ComplexPairTy Val; // ignored
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 3997866ea68e..d528e0c4b72a 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -40,20 +40,27 @@ class ConstStructBuilder {
bool Packed;
CharUnits NextFieldOffsetInChars;
CharUnits LLVMStructAlignment;
- std::vector<llvm::Constant *> Elements;
+ SmallVector<llvm::Constant *, 32> Elements;
public:
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
InitListExpr *ILE);
-
-private:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ const APValue &Value, QualType ValTy);
+
+private:
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
: CGM(CGM), CGF(CGF), Packed(false),
NextFieldOffsetInChars(CharUnits::Zero()),
LLVMStructAlignment(CharUnits::One()) { }
- bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
+ void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
+
void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::ConstantInt *InitExpr);
@@ -62,8 +69,12 @@ private:
void AppendTailPadding(CharUnits RecordSize);
void ConvertStructToPacked();
-
+
bool Build(InitListExpr *ILE);
+ void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
+ CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
CharUnits getAlignment(const llvm::Constant *C) const {
if (Packed) return CharUnits::One();
@@ -77,14 +88,36 @@ private:
}
};
-bool ConstStructBuilder::
+void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ // Find the appropriate vtable within the vtable group.
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ llvm::Value *Indices[] = {
+ llvm::ConstantInt::get(CGM.Int64Ty, 0),
+ llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
+ };
+ llvm::Constant *VTableAddressPoint =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
+
+ // Add the vtable at the start of the object.
+ AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
+}
+
+void ConstStructBuilder::
AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitCst) {
-
const ASTContext &Context = CGM.getContext();
CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+ AppendBytes(FieldOffsetInChars, InitCst);
+}
+
+void ConstStructBuilder::
+AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
+
assert(NextFieldOffsetInChars <= FieldOffsetInChars
&& "Field offset mismatch!");
@@ -99,14 +132,13 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
// Convert the struct to a packed struct.
ConvertStructToPacked();
-
+
AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
// We need to append padding.
- AppendPadding(
- FieldOffsetInChars - NextFieldOffsetInChars);
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
"Did not add enough padding!");
@@ -118,14 +150,12 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
Elements.push_back(InitCst);
NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
getSizeInChars(InitCst);
-
+
if (Packed)
- assert(LLVMStructAlignment == CharUnits::One() &&
+ assert(LLVMStructAlignment == CharUnits::One() &&
"Packed struct not byte-aligned!");
else
LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
-
- return true;
}
void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
@@ -280,7 +310,7 @@ void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
if (PadSize.isZero())
return;
- llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = CGM.Int8Ty;
if (PadSize > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
@@ -300,7 +330,7 @@ void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
}
void ConstStructBuilder::ConvertStructToPacked() {
- std::vector<llvm::Constant *> PackedElements;
+ SmallVector<llvm::Constant *, 16> PackedElements;
CharUnits ElementOffsetInChars = CharUnits::Zero();
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
@@ -316,7 +346,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
CharUnits NumChars =
AlignedElementOffsetInChars - ElementOffsetInChars;
- llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = CGM.Int8Ty;
if (NumChars > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
@@ -332,12 +362,17 @@ void ConstStructBuilder::ConvertStructToPacked() {
assert(ElementOffsetInChars == NextFieldOffsetInChars &&
"Packing the struct changed its size!");
- Elements = PackedElements;
+ Elements.swap(PackedElements);
LLVMStructAlignment = CharUnits::One();
Packed = true;
}
bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ if (ILE->initializesStdInitializerList()) {
+ //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
+ return false;
+ }
+
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
@@ -382,8 +417,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (!Field->isBitField()) {
// Handle non-bitfield members.
- if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
- return false;
+ AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
} else {
// Otherwise we have a bitfield.
AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
@@ -391,6 +425,106 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
}
+ return true;
+}
+
+namespace {
+struct BaseInfo {
+ BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
+ : Decl(Decl), Offset(Offset), Index(Index) {
+ }
+
+ const CXXRecordDecl *Decl;
+ CharUnits Offset;
+ unsigned Index;
+
+ bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
+};
+}
+
+void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
+ bool IsPrimaryBase, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ CharUnits Offset) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Add a vtable pointer, if we need one and it hasn't already been added.
+ if (CD->isDynamicClass() && !IsPrimaryBase)
+ AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
+
+ // Accumulate and sort bases, in order to visit them in address order, which
+ // may not be the same as declaration order.
+ llvm::SmallVector<BaseInfo, 8> Bases;
+ Bases.reserve(CD->getNumBases());
+ unsigned BaseNo = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
+ BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
+ assert(!Base->isVirtual() && "should not have virtual bases here");
+ const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
+ Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
+ }
+ std::stable_sort(Bases.begin(), Bases.end());
+
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
+ BaseInfo &Base = Bases[I];
+
+ bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
+ Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTable, VTableClass, Offset + Base.Offset);
+ }
+ }
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && Val.getUnionField() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Emit the value of the initializer.
+ const APValue &FieldValue =
+ RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
+ llvm::Constant *EltInit =
+ CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
+ assert(EltInit && "EmitConstantValue can't fail");
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+}
+
+llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
+ RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
CharUnits LayoutSizeInChars = Layout.getSize();
if (NextFieldOffsetInChars > LayoutSizeInChars) {
@@ -398,66 +532,85 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
// we must have a flexible array member at the end.
assert(RD->hasFlexibleArrayMember() &&
"Must have flexible array member if struct is bigger than type!");
-
+
// No tail padding is necessary.
- return true;
- }
+ } else {
+ // Append tail padding if necessary.
+ AppendTailPadding(LayoutSizeInChars);
- CharUnits LLVMSizeInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
- // Check if we need to convert the struct to a packed struct.
- if (NextFieldOffsetInChars <= LayoutSizeInChars &&
- LLVMSizeInChars > LayoutSizeInChars) {
- assert(!Packed && "Size mismatch!");
-
- ConvertStructToPacked();
- assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
- "Converting to packed did not help!");
- }
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
+ assert(!Packed && "Size mismatch!");
- // Append tail padding if necessary.
- AppendTailPadding(LayoutSizeInChars);
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
+ "Converting to packed did not help!");
+ }
- assert(LayoutSizeInChars == NextFieldOffsetInChars &&
- "Tail padding mismatch!");
+ assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ "Tail padding mismatch!");
+ }
- return true;
-}
-
-llvm::Constant *ConstStructBuilder::
- BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
- ConstStructBuilder Builder(CGM, CGF);
-
- if (!Builder.Build(ILE))
- return 0;
-
// Pick the type to use. If the type is layout identical to the ConvertType
// type then use it, otherwise use whatever the builder produced for us.
llvm::StructType *STy =
llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
- Builder.Elements,Builder.Packed);
- llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
- if (llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
- if (ILESTy->isLayoutIdentical(STy))
- STy = ILESTy;
+ Elements, Packed);
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
+ if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
+ if (ValSTy->isLayoutIdentical(STy))
+ STy = ValSTy;
}
-
- llvm::Constant *Result =
- llvm::ConstantStruct::get(STy, Builder.Elements);
-
- assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
- Builder.getAlignment(Result)) ==
- Builder.getSizeInChars(Result) && "Size mismatch!");
-
+
+ llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
+
+ assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
+ getSizeInChars(Result) && "Size mismatch!");
+
return Result;
}
-
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ return Builder.Finalize(ILE->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const APValue &Val,
+ QualType ValTy) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ llvm::Constant *VTable = 0;
+ if (CD && CD->isDynamicClass())
+ VTable = CGM.getVTables().GetAddrOfVTable(CD);
+
+ Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
+
+ return Builder.Finalize(ValTy);
+}
+
+
//===----------------------------------------------------------------------===//
// ConstExprEmitter
//===----------------------------------------------------------------------===//
-
+
+/// This class only needs to handle two cases:
+/// 1) Literals (this is used by APValue emission to emit literals).
+/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
+/// constant fold these types).
class ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
@@ -493,34 +646,6 @@ public:
return Visit(E->getInitializer());
}
- llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
- if (E->getType()->isMemberPointerType())
- return CGM.getMemberPointerConstant(E);
-
- return 0;
- }
-
- llvm::Constant *VisitBinSub(BinaryOperator *E) {
- // This must be a pointer/pointer subtraction. This only happens for
- // address of label.
- if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
- !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
- return 0;
-
- llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
- E->getLHS()->getType(), CGF);
- llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
- E->getRHS()->getType(), CGF);
-
- llvm::Type *ResultType = ConvertType(E->getType());
- LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
- RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
-
- // No need to divide by element size, since addr of label is always void*,
- // which has size 1 in GNUish.
- return llvm::ConstantExpr::getSub(LHS, RHS);
- }
-
llvm::Constant *VisitCastExpr(CastExpr* E) {
Expr *subExpr = E->getSubExpr();
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
@@ -536,8 +661,8 @@ public:
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
- std::vector<llvm::Constant*> Elts;
- std::vector<llvm::Type*> Types;
+ SmallVector<llvm::Constant*, 2> Elts;
+ SmallVector<llvm::Type*, 2> Types;
Elts.push_back(C);
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
@@ -545,7 +670,7 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Ty = CGM.Int8Ty;
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
@@ -557,46 +682,41 @@ public:
llvm::StructType::get(C->getType()->getContext(), Types, false);
return llvm::ConstantStruct::get(STy, Elts);
}
- case CK_NullToMemberPointer: {
- const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
- return CGM.getCXXABI().EmitNullMemberPointer(MPT);
- }
-
- case CK_DerivedToBaseMemberPointer:
- case CK_BaseToDerivedMemberPointer:
- return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return C;
- case CK_CPointerToObjCPointerCast:
- case CK_BlockPointerToObjCPointerCast:
- case CK_AnyPointerToBlockPointerCast:
- case CK_LValueBitCast:
- case CK_BitCast:
- if (C->getType() == destType) return C;
- return llvm::ConstantExpr::getBitCast(C, destType);
-
case CK_Dependent: llvm_unreachable("saw dependent cast!");
+ case CK_ReinterpretMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
+
// These will never be supported.
case CK_ObjCObjectLValueCast:
- case CK_GetObjCProperty:
- case CK_ToVoid:
- case CK_Dynamic:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
return 0;
- // These might need to be supported for constexpr.
+ // These don't need to be handled here because Evaluate knows how to
+ // evaluate them in the cases where they can be folded.
+ case CK_BitCast:
+ case CK_ToVoid:
+ case CK_Dynamic:
+ case CK_LValueBitCast:
+ case CK_NullToMemberPointer:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
- return 0;
-
- // These should eventually be supported.
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_BaseToDerived:
@@ -614,53 +734,17 @@ public:
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
- return 0;
-
case CK_PointerToIntegral:
- if (!E->getType()->isBooleanType())
- return llvm::ConstantExpr::getPtrToInt(C, destType);
- // fallthrough
-
case CK_PointerToBoolean:
- return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
- llvm::ConstantPointerNull::get(cast<llvm::PointerType>(C->getType())));
-
case CK_NullToPointer:
- return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destType));
-
- case CK_IntegralCast: {
- bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
- return llvm::ConstantExpr::getIntegerCast(C, destType, isSigned);
- }
-
- case CK_IntegralToPointer: {
- bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
- C = llvm::ConstantExpr::getIntegerCast(C, CGM.IntPtrTy, isSigned);
- return llvm::ConstantExpr::getIntToPtr(C, destType);
- }
-
+ case CK_IntegralCast:
+ case CK_IntegralToPointer:
case CK_IntegralToBoolean:
- return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
- llvm::Constant::getNullValue(C->getType()));
-
case CK_IntegralToFloating:
- if (subExpr->getType()->isSignedIntegerOrEnumerationType())
- return llvm::ConstantExpr::getSIToFP(C, destType);
- else
- return llvm::ConstantExpr::getUIToFP(C, destType);
-
case CK_FloatingToIntegral:
- if (E->getType()->isSignedIntegerOrEnumerationType())
- return llvm::ConstantExpr::getFPToSI(C, destType);
- else
- return llvm::ConstantExpr::getFPToUI(C, destType);
-
case CK_FloatingToBoolean:
- return llvm::ConstantExpr::getFCmp(llvm::CmpInst::FCMP_UNE, C,
- llvm::Constant::getNullValue(C->getType()));
-
case CK_FloatingCast:
- return llvm::ConstantExpr::getFPCast(C, destType);
+ return 0;
}
llvm_unreachable("Invalid CastKind");
}
@@ -675,12 +759,13 @@ public:
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
unsigned NumInitElements = ILE->getNumInits();
- if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
+ if (NumInitElements == 1 &&
+ CGM.getContext().hasSameUnqualifiedType(ILE->getType(),
+ ILE->getInit(0)->getType()) &&
(isa<StringLiteral>(ILE->getInit(0)) ||
isa<ObjCEncodeExpr>(ILE->getInit(0))))
return Visit(ILE->getInit(0));
- std::vector<llvm::Constant*> Elts;
llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
llvm::Type *ElemTy = AType->getElementType();
@@ -691,9 +776,11 @@ public:
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
// Copy initializer elements.
- unsigned i = 0;
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumInitableElts + NumElements);
+
bool RewriteType = false;
- for (; i < NumInitableElts; ++i) {
+ for (unsigned i = 0; i < NumInitableElts; ++i) {
Expr *Init = ILE->getInit(i);
llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
if (!C)
@@ -712,13 +799,13 @@ public:
if (!fillC)
return 0;
RewriteType |= (fillC->getType() != ElemTy);
- for (; i < NumElements; ++i)
- Elts.push_back(fillC);
+ Elts.resize(NumElements, fillC);
if (RewriteType) {
// FIXME: Try to avoid packing the array
std::vector<llvm::Type*> Types;
- for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.reserve(NumInitableElts + NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
Types.push_back(Elts[i]->getType());
llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
Types, true);
@@ -741,31 +828,6 @@ public:
}
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
- if (ILE->getType()->isAnyComplexType() && ILE->getNumInits() == 2) {
- // Complex type with element initializers
- Expr *Real = ILE->getInit(0);
- Expr *Imag = ILE->getInit(1);
- llvm::Constant *Complex[2];
- Complex[0] = CGM.EmitConstantExpr(Real, Real->getType(), CGF);
- if (!Complex[0])
- return 0;
- Complex[1] = CGM.EmitConstantExpr(Imag, Imag->getType(), CGF);
- if (!Complex[1])
- return 0;
- llvm::StructType *STy =
- cast<llvm::StructType>(ConvertType(ILE->getType()));
- return llvm::ConstantStruct::get(STy, Complex);
- }
-
- if (ILE->getType()->isScalarType()) {
- // We have a scalar in braces. Just use the first element.
- if (ILE->getNumInits() > 0) {
- Expr *Init = ILE->getInit(0);
- return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
- }
- return CGM.EmitNullConstant(ILE->getType());
- }
-
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -775,11 +837,7 @@ public:
if (ILE->getType()->isUnionType())
return EmitUnionInitialization(ILE);
- // If ILE was a constant vector, we would have handled it already.
- if (ILE->getType()->isVectorType())
- return 0;
-
- llvm_unreachable("Unable to handle InitListExpr");
+ return 0;
}
llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -817,13 +875,7 @@ public:
}
llvm::Constant *VisitStringLiteral(StringLiteral *E) {
- assert(!E->getType()->isPointerType() && "Strings are always arrays");
-
- // This must be a string initializing an array in a static initializer.
- // Don't emit it as the address of the string, emit the string data itself
- // as an inline array.
- return llvm::ConstantArray::get(VMContext,
- CGM.GetStringForStringLiteral(E), false);
+ return CGM.GetConstantArrayFromStringLiteral(E);
}
llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
@@ -837,7 +889,7 @@ public:
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
Str.resize(CAT->getSize().getZExtValue(), '\0');
- return llvm::ConstantArray::get(VMContext, Str, false);
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
@@ -850,25 +902,8 @@ public:
}
public:
- llvm::Constant *EmitLValue(Expr *E) {
- switch (E->getStmtClass()) {
- default: break;
- case Expr::CompoundLiteralExprClass: {
- // Note that due to the nature of compound literals, this is guaranteed
- // to be the only use of the variable, so we just generate it here.
- CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
- llvm::Constant* C = Visit(CLE->getInitializer());
- // FIXME: "Leaked" on failure.
- if (C)
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
- E->getType().isConstant(CGM.getContext()),
- llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", 0, false,
- CGM.getContext().getTargetAddressSpace(E->getType()));
- return C;
- }
- case Expr::DeclRefExprClass: {
- ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
if (Decl->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(Decl);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
@@ -884,7 +919,26 @@ public:
}
}
}
- break;
+ return 0;
+ }
+
+ Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
+ CLE->getType(), CGF);
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ CGM.getContext().getTargetAddressSpace(E->getType()));
+ return C;
}
case Expr::StringLiteralClass:
return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
@@ -915,7 +969,7 @@ public:
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
- unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+ unsigned builtin = CE->isBuiltinCall();
if (builtin !=
Builtin::BI__builtin___CFStringMakeConstantString &&
builtin !=
@@ -939,6 +993,15 @@ public:
return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
}
+ case Expr::CXXTypeidExprClass: {
+ CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
+ QualType T;
+ if (Typeid->isTypeOperand())
+ T = Typeid->getTypeOperand();
+ else
+ T = Typeid->getExprOperand()->getType();
+ return CGM.GetAddrOfRTTIDescriptor(T);
+ }
}
return 0;
@@ -947,6 +1010,31 @@ public:
} // end anonymous namespace.
+llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF) {
+ if (const APValue *Value = D.evaluateValue())
+ return EmitConstantValueForMemory(*Value, D.getType(), CGF);
+
+ // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
+ // reference is a constant expression, and the reference binds to a temporary,
+ // then constant initialization is performed. ConstExprEmitter will
+ // incorrectly emit a prvalue constant in this case, and the calling code
+ // interprets that as the (pointer) value of the reference, rather than the
+ // desired value of the referee.
+ if (D.getType()->isReferenceType())
+ return 0;
+
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
QualType DestType,
CodeGenFunction *CGF) {
@@ -957,142 +1045,207 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
if (DestType->isReferenceType())
Success = E->EvaluateAsLValue(Result, Context);
else
- Success = E->Evaluate(Result, Context);
-
- if (Success && !Result.HasSideEffects) {
- switch (Result.Val.getKind()) {
- case APValue::Uninitialized:
- llvm_unreachable("Constant expressions should be initialized.");
- case APValue::LValue: {
- llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
- llvm::Constant *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- Result.Val.getLValueOffset().getQuantity());
-
- llvm::Constant *C;
- if (const Expr *LVBase = Result.Val.getLValueBase()) {
- C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
-
- // Apply offset if necessary.
- if (!Offset->isNullValue()) {
- llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
- llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
- Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
- C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
- }
+ Success = E->EvaluateAsRValue(Result, Context);
- // Convert to the appropriate type; this could be an lvalue for
- // an integer.
- if (isa<llvm::PointerType>(DestTy))
- return llvm::ConstantExpr::getBitCast(C, DestTy);
+ llvm::Constant *C = 0;
+ if (Success && !Result.HasSideEffects)
+ C = EmitConstantValue(Result.Val, DestType, CGF);
+ else
+ C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- return llvm::ConstantExpr::getPtrToInt(C, DestTy);
- } else {
- C = Offset;
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
- // Convert to the appropriate type; this could be an lvalue for
- // an integer.
- if (isa<llvm::PointerType>(DestTy))
- return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::LValue: {
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (APValue::LValueBase LVBase = Value.getLValueBase()) {
+ // An array can be represented as an lvalue referring to the base.
+ if (isa<llvm::ArrayType>(DestTy)) {
+ assert(Offset->isNullValue() && "offset on array initializer");
+ return ConstExprEmitter(*this, CGF).Visit(
+ const_cast<Expr*>(LVBase.get<const Expr*>()));
+ }
- // If the types don't match this should only be a truncate.
- if (C->getType() != DestTy)
- return llvm::ConstantExpr::getTrunc(C, DestTy);
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
- return C;
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
}
- }
- case APValue::Int: {
- llvm::Constant *C = llvm::ConstantInt::get(VMContext,
- Result.Val.getInt());
- if (C->getType()->isIntegerTy(1)) {
- llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
- C = llvm::ConstantExpr::getZExt(C, BoolTy);
- }
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
return C;
}
- case APValue::ComplexInt: {
- llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantInt::get(VMContext,
- Result.Val.getComplexIntReal());
- Complex[1] = llvm::ConstantInt::get(VMContext,
- Result.Val.getComplexIntImag());
-
- // FIXME: the target may want to specify that this is packed.
- llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
- Complex[1]->getType(),
- NULL);
- return llvm::ConstantStruct::get(STy, Complex);
- }
- case APValue::Float: {
- const llvm::APFloat &Init = Result.Val.getFloat();
- if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
- return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ }
+ case APValue::Int:
+ return llvm::ConstantInt::get(VMContext, Value.getInt());
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Float: {
+ const llvm::APFloat &Init = Value.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Vector: {
+ SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Value.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ const APValue &Elt = Value.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
else
- return llvm::ConstantFP::get(VMContext, Init);
- }
- case APValue::ComplexFloat: {
- llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantFP::get(VMContext,
- Result.Val.getComplexFloatReal());
- Complex[1] = llvm::ConstantFP::get(VMContext,
- Result.Val.getComplexFloatImag());
-
- // FIXME: the target may want to specify that this is packed.
- llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
- Complex[1]->getType(),
- NULL);
- return llvm::ConstantStruct::get(STy, Complex);
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
}
- case APValue::Vector: {
- SmallVector<llvm::Constant *, 4> Inits;
- unsigned NumElts = Result.Val.getVectorLength();
-
- if (Context.getLangOptions().AltiVec &&
- isa<CastExpr>(E) &&
- cast<CastExpr>(E)->getCastKind() == CK_VectorSplat) {
- // AltiVec vector initialization with a single literal
- APValue &Elt = Result.Val.getVectorElt(0);
-
- llvm::Constant* InitValue = Elt.isInt()
- ? cast<llvm::Constant>
- (llvm::ConstantInt::get(VMContext, Elt.getInt()))
- : cast<llvm::Constant>
- (llvm::ConstantFP::get(VMContext, Elt.getFloat()));
-
- for (unsigned i = 0; i != NumElts; ++i)
- Inits.push_back(InitValue);
+ return llvm::ConstantVector::get(Inits);
+ }
+ case APValue::AddrLabelDiff: {
+ const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
+ const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
+ llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
+ llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
+
+ // Compute difference
+ llvm::Type *ResultType = getTypes().ConvertType(DestType);
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
+ llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
+
+ // LLVM is a bit sensitive about the exact format of the
+ // address-of-label difference; make sure to truncate after
+ // the subtraction.
+ return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
+ }
+ case APValue::Struct:
+ case APValue::Union:
+ return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
+ case APValue::Array: {
+ const ArrayType *CAT = Context.getAsArrayType(DestType);
+ unsigned NumElements = Value.getArraySize();
+ unsigned NumInitElts = Value.getArrayInitializedElts();
- } else {
- for (unsigned i = 0; i != NumElts; ++i) {
- APValue &Elt = Result.Val.getVectorElt(i);
- if (Elt.isInt())
- Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
- else
- Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
- }
- }
- return llvm::ConstantVector::get(Inits);
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
+
+ // Emit array filler, if there is one.
+ llvm::Constant *Filler = 0;
+ if (Value.hasArrayFiller())
+ Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
+ CAT->getElementType(), CGF);
+
+ // Emit initializer elements.
+ llvm::Type *CommonElementType = 0;
+ for (unsigned I = 0; I < NumElements; ++I) {
+ llvm::Constant *C = Filler;
+ if (I < NumInitElts)
+ C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
+ CAT->getElementType(), CGF);
+ if (I == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = 0;
+ Elts.push_back(C);
}
+
+ if (!CommonElementType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
}
+
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+ case APValue::MemberPointer:
+ return getCXXABI().EmitMemberPointer(Value, DestType);
}
+ llvm_unreachable("Unknown APValue kind");
+}
- llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- if (C && C->getType()->isIntegerTy(1)) {
- llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+llvm::Constant *
+CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
+ if (C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
}
-static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
- const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
- return layout.getFieldOffset(field->getFieldIndex());
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
+ assert(E->isFileScope() && "not a file-scope compound literal expr");
+ return ConstExprEmitter(*this, 0).EmitLValue(E);
}
-
+
llvm::Constant *
CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
// Member pointer constants always have a very particular form.
@@ -1104,25 +1257,14 @@ CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
return getCXXABI().EmitMemberPointer(method);
// Otherwise, a member data pointer.
- uint64_t fieldOffset;
- if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
- fieldOffset = getFieldOffset(getContext(), field);
- else {
- const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
-
- fieldOffset = 0;
- for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
- ce = ifield->chain_end(); ci != ce; ++ci)
- fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
- }
-
+ uint64_t fieldOffset = getContext().getFieldOffset(decl);
CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
return getCXXABI().EmitMemberDataPointer(type, chars);
}
static void
FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
- std::vector<llvm::Constant *> &Elements,
+ SmallVectorImpl<llvm::Constant *> &Elements,
uint64_t StartOffset) {
assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
"StartOffset not byte aligned!");
@@ -1189,8 +1331,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
// FIXME: hardcodes Itanium member pointer representation!
llvm::Constant *NegativeOne =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
- -1ULL, /*isSigned*/true);
+ llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
// Fill in the null data member pointer.
for (CharUnits I = StartIndex; I != EndIndex; ++I)
@@ -1238,13 +1379,17 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
for (RecordDecl::field_iterator I = record->field_begin(),
E = record->field_end(); I != E; ++I) {
const FieldDecl *field = *I;
-
- // Ignore bit fields.
- if (field->isBitField())
- continue;
-
- unsigned fieldIndex = layout.getLLVMFieldNo(field);
- elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!field->isBitField()) {
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // For unions, stop after the first named field.
+ if (record->isUnion() && field->getDeclName())
+ break;
}
// Fill in the virtual bases, if we're working with the complete object.
@@ -1299,14 +1444,13 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
unsigned numBaseElements = baseArrayType->getNumElements();
// Fill in null data member pointers.
- std::vector<llvm::Constant *> baseElements(numBaseElements);
+ SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
baseElements, 0);
// Now go through all other elements and zero them out.
if (numBaseElements) {
- llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
for (unsigned i = 0; i != numBaseElements; ++i) {
if (!baseElements[i])
baseElements[i] = i8_zero;
@@ -1321,17 +1465,18 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+ llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
QualType ElementTy = CAT->getElementType();
llvm::Constant *Element = EmitNullConstant(ElementTy);
unsigned NumElements = CAT->getSize().getZExtValue();
- std::vector<llvm::Constant *> Array(NumElements);
- for (unsigned i = 0; i != NumElements; ++i)
- Array[i] = Element;
-
- llvm::ArrayType *ATy =
- cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+
+ if (Element->isNullValue())
+ return llvm::ConstantAggregateZero::get(ATy);
+
+ SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
return llvm::ConstantArray::get(ATy, Array);
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 3a9fbeed9d69..18891f7492a2 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -177,6 +177,9 @@ public:
Value *VisitCharacterLiteral(const CharacterLiteral *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
+ Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
@@ -197,6 +200,10 @@ public:
return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
}
+ Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getScalarVal();
+ }
+
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
@@ -204,33 +211,17 @@ public:
// Otherwise, assume the mapping is the scalar directly.
return CGF.getOpaqueRValueMapping(E).getScalarVal();
}
-
+
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
- Expr::EvalResult Result;
- if (!E->Evaluate(Result, CGF.getContext()))
- return EmitLoadOfLValue(E);
-
- assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
-
- llvm::Constant *C;
- if (Result.Val.isInt())
- C = Builder.getInt(Result.Val.getInt());
- else if (Result.Val.isFloat())
- C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
- else
- return EmitLoadOfLValue(E);
-
- // Make sure we emit a debug reference to the global variable.
- if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
- if (!CGF.getContext().DeclMustBeEmitted(VD))
- CGF.EmitDeclRefExprDbgValue(E, C);
- } else if (isa<EnumConstantDecl>(E->getDecl())) {
- CGF.EmitDeclRefExprDbgValue(E, C);
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return result.getValue();
}
-
- return C;
+ return EmitLoadOfLValue(E);
}
+
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
return CGF.EmitObjCSelectorExpr(E);
}
@@ -240,11 +231,6 @@ public:
Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
- Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- assert(E->getObjectKind() == OK_Ordinary &&
- "reached property reference without lvalue-to-rvalue");
- return EmitLoadOfLValue(E);
- }
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
if (E->getMethodDecl() &&
E->getMethodDecl()->getResultType()->isReferenceType())
@@ -287,8 +273,6 @@ public:
Value *VisitStmtExpr(const StmtExpr *E);
- Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
-
// Unary Operators.
Value *VisitUnaryPostDec(const UnaryOperator *E) {
LValue LV = EmitLValue(E->getSubExpr());
@@ -354,7 +338,9 @@ public:
}
Value *VisitExprWithCleanups(ExprWithCleanups *E) {
- return CGF.EmitExprWithCleanups(E).getScalarVal();
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
}
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
@@ -405,7 +391,7 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
case LangOptions::SOB_Defined:
@@ -420,7 +406,7 @@ public:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
bool isTrapvOverflowBehavior() {
- return CGF.getContext().getLangOptions().getSignedOverflowBehavior()
+ return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
== LangOptions::SOB_Trapping;
}
/// Create a binary op that checks for overflow.
@@ -512,6 +498,15 @@ public:
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
+ Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return CGF.EmitObjCNumericLiteral(E);
+ }
+ Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ return CGF.EmitObjCArrayLiteral(E);
+ }
+ Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ return CGF.EmitObjCDictionaryLiteral(E);
+ }
Value *VisitAsTypeExpr(AsTypeExpr *CE);
Value *VisitAtomicExpr(AtomicExpr *AE);
};
@@ -559,7 +554,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (SrcType->isHalfType()) {
Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src);
SrcType = CGF.getContext().FloatTy;
- SrcTy = llvm::Type::getFloatTy(VMContext);
+ SrcTy = CGF.FloatTy;
}
// Handle conversions to bool first, they are special: comparisons against 0.
@@ -609,12 +604,9 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
- for (unsigned i = 0; i != NumElements; ++i)
- Args.push_back(Builder.getInt32(0));
-
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements,
+ Builder.getInt32(0));
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -630,7 +622,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Cast to half via float
if (DstType->isHalfType())
- DstTy = llvm::Type::getFloatTy(VMContext);
+ DstTy = CGF.FloatTy;
if (isa<llvm::IntegerType>(SrcTy)) {
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
@@ -748,11 +740,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
(1 << llvm::Log2_32(LHSElts))-1);
// Mask off the high bits of each shuffle index.
- SmallVector<llvm::Constant *, 32> MaskV;
- for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
- MaskV.push_back(EltMask);
-
- Value* MaskBits = llvm::ConstantVector::get(MaskV);
+ Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
+ EltMask);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
// newv = undef
@@ -765,8 +754,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
- Value *Indx = Builder.getInt32(i);
- Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
+ Value *IIndx = Builder.getInt32(i);
+ Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
// Handle vec3 special since the index will be off by one for the RHS.
@@ -778,7 +767,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
}
Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
- NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
+ NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
}
return NewV;
}
@@ -800,13 +789,13 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
- Expr::EvalResult Result;
- if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
if (E->isArrow())
CGF.EmitScalarExpr(E->getBase());
else
EmitLValue(E->getBase());
- return Builder.getInt(Result.Val.getInt());
+ return Builder.getInt(Value);
}
// Emit debug info for aggregate now, if it was delayed to reduce
@@ -900,8 +889,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (CurIdx == 0) {
// insert into undef -> shuffle (src, undef)
Args.push_back(C);
- for (unsigned j = 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
LHS = EI->getVectorOperand();
RHS = V;
@@ -912,9 +900,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned j = 0; j != CurIdx; ++j)
Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
- for (unsigned j = CurIdx + 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
-
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
VIsUndefShuffle = false;
@@ -958,8 +945,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
- for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -973,8 +959,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
Args.push_back(Builder.getInt32(j));
- for (unsigned j = InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
@@ -984,8 +969,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(Builder.getInt32(j));
for (unsigned j = 0; j != InitElts; ++j)
Args.push_back(Builder.getInt32(j+Offset));
- for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
@@ -1053,7 +1037,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *V = EmitLValue(E).getAddress();
V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy));
}
case CK_CPointerToObjCPointerCast:
@@ -1063,6 +1047,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *Src = Visit(const_cast<Expr*>(E));
return Builder.CreateBitCast(Src, ConvertType(DestTy));
}
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_UserDefinedConversion:
return Visit(const_cast<Expr*>(E));
@@ -1130,6 +1116,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
}
+ case CK_ReinterpretMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer: {
Value *Src = Visit(E);
@@ -1155,6 +1142,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_ARCExtendBlockObject:
return CGF.EmitARCExtendBlockObject(E);
+ case CK_CopyAndAutoreleaseBlockObject:
+ return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -1164,15 +1154,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_ConstructorConversion:
case CK_ToUnion:
llvm_unreachable("scalar cast to non-scalar value");
- break;
-
- case CK_GetObjCProperty: {
- assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
- assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
- "CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
- RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E));
- return RV.getScalarVal();
- }
case CK_LValueToRValue:
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
@@ -1202,6 +1183,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
+ Elt = EmitScalarConversion(Elt, E->getType(),
+ DestTy->getAs<VectorType>()->getElementType());
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
@@ -1209,13 +1192,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
llvm::Constant *Zero = Builder.getInt32(0);
- for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(Zero);
-
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, Zero);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -1252,7 +1231,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
llvm_unreachable("unknown scalar cast");
- return 0;
}
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
@@ -1261,11 +1239,6 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
.getScalarVal();
}
-Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- LValue LV = CGF.EmitBlockDeclRefLValue(E);
- return CGF.EmitLoadOfLValue(LV).getScalarVal();
-}
-
//===----------------------------------------------------------------------===//
// Unary Operators
//===----------------------------------------------------------------------===//
@@ -1274,13 +1247,11 @@ llvm::Value *ScalarExprEmitter::
EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *InVal,
llvm::Value *NextVal, bool IsInc) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
- break;
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
- break;
case LangOptions::SOB_Trapping:
BinOpInfo BinOp;
BinOp.LHS = InVal;
@@ -1300,9 +1271,21 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
QualType type = E->getSubExpr()->getType();
llvm::Value *value = EmitLoadOfLValue(LV);
llvm::Value *input = value;
+ llvm::PHINode *atomicPHI = 0;
int amount = (isInc ? 1 : -1);
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ type = atomicTy->getValueType();
+ value = atomicPHI;
+ }
+
// Special case of integer increment that we have to check first: bool++.
// Due to promotion rules, we get:
// bool++ -> bool = bool + 1
@@ -1336,7 +1319,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
@@ -1346,7 +1329,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
@@ -1355,7 +1338,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
@@ -1416,12 +1399,24 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
+ value, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
// Store the updated result through the lvalue.
if (LV.isBitField())
@@ -1459,6 +1454,15 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
}
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+
+ // Perform vector logical not on comparison with zero vector.
+ if (E->getType()->isExtVectorType()) {
+ Value *Oper = Visit(E->getSubExpr());
+ Value *Zero = llvm::Constant::getNullValue(Oper->getType());
+ Value *Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+ }
+
// Compare operand to zero.
Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
@@ -1473,9 +1477,9 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Try folding the offsetof to a constant.
- Expr::EvalResult EvalResult;
- if (E->Evaluate(EvalResult, CGF.getContext()))
- return Builder.getInt(EvalResult.Val.getInt());
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext()))
+ return Builder.getInt(Value);
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
@@ -1596,9 +1600,7 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
// If this isn't sizeof(vla), the result must be constant; use the constant
// folding logic so we don't have to duplicate it here.
- Expr::EvalResult Result;
- E->Evaluate(Result, CGF.getContext());
- return Builder.getInt(Result.Val.getInt());
+ return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
@@ -1632,7 +1634,10 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
- CGF.EmitScalarExpr(Op, true);
+ if (Op->isGLValue())
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -1679,12 +1684,38 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
+
+ llvm::PHINode *atomicPHI = 0;
+ if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.Ty = atomicTy->getValueType();
+ OpInfo.LHS = atomicPHI;
+ }
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
+ Result, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
// Store the result value into the LHS lvalue. Bit-fields are handled
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
@@ -1709,11 +1740,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return RHS;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1772,8 +1799,18 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Builder.SetInsertPoint(DivCont);
}
}
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (CGF.getContext().getLangOpts().OpenCL) {
+ // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ llvm::Type *ValTy = Val->getType();
+ if (ValTy->isFloatTy() ||
+ (isa<llvm::VectorType>(ValTy) &&
+ cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
+ CGF.SetFPAccuracy(Val, 2.5);
+ }
+ return Val;
+ }
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
@@ -1817,7 +1854,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
break;
default:
llvm_unreachable("Unsupported operation for overflow detection");
- IID = 0;
}
OpID <<= 1;
OpID |= 1;
@@ -1841,7 +1877,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Handle overflow with llvm.trap.
const std::string *handlerName =
- &CGF.getContext().getLangOptions().OverflowHandler;
+ &CGF.getContext().getLangOpts().OverflowHandler;
if (handlerName->empty()) {
EmitOverflowBB(overflowBB);
Builder.SetInsertPoint(continueBB);
@@ -1853,7 +1889,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.SetInsertPoint(overflowBB);
// Get the overflow handler.
- llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Int8Ty = CGF.Int8Ty;
llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
@@ -1940,7 +1976,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- if (CGF.getLangOptions().isSignedOverflowDefined()) {
+ if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
} else {
@@ -1959,7 +1995,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
- if (CGF.getLangOptions().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
@@ -1971,7 +2007,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
case LangOptions::SOB_Defined:
@@ -1991,7 +2027,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Defined:
@@ -2117,36 +2153,28 @@ static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
case BuiltinType::UChar:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
- break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
- break;
case BuiltinType::UShort:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
- break;
case BuiltinType::Short:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
- break;
case BuiltinType::UInt:
case BuiltinType::ULong:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
- break;
case BuiltinType::Int:
case BuiltinType::Long:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
- break;
case BuiltinType::Float:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
- break;
}
- return llvm::Intrinsic::not_intrinsic;
}
Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
@@ -2325,11 +2353,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return RHS;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -2341,6 +2365,18 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+
+ // Perform vector logical and on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *And = Builder.CreateAnd(LHS, RHS);
+ return Builder.CreateSExt(And, Zero->getType(), "sext");
+ }
+
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
@@ -2396,6 +2432,18 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+
+ // Perform vector logical or on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *Or = Builder.CreateOr(LHS, RHS);
+ return Builder.CreateSExt(Or, Zero->getType(), "sext");
+ }
+
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
@@ -2503,16 +2551,23 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
Expr *live = lhsExpr, *dead = rhsExpr;
if (!CondExprBool) std::swap(live, dead);
- // If the dead side doesn't have labels we need, and if the Live side isn't
- // the gnu missing ?: extension (which we could handle, but don't bother
- // to), just emit the Live part.
- if (!CGF.ContainsLabel(dead))
- return Visit(live);
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!CGF.ContainsLabel(dead)) {
+ Value *Result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value*. However, a conditional
+ // with non-void type must return a non-null Value*.
+ if (!Result && !E->getType()->isVoidType())
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+
+ return Result;
+ }
}
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getContext().getLangOptions().OpenCL
+ if (CGF.getContext().getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
@@ -2524,11 +2579,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
unsigned numElem = vecTy->getNumElements();
llvm::Type *elemType = vecTy->getElementType();
- std::vector<llvm::Constant*> Zvals;
- for (unsigned i = 0; i < numElem; ++i)
- Zvals.push_back(llvm::ConstantInt::get(elemType, 0));
-
- llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
+ llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
llvm::Value *tmp = Builder.CreateSExt(TestMSB,
llvm::VectorType::get(elemType,
@@ -2564,6 +2615,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
+ if (!LHS) {
+ // If the conditional has void type, make sure we return a null Value*.
+ assert(!RHS && "LHS and RHS types must match");
+ return 0;
+ }
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
@@ -2661,8 +2717,7 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Args.push_back(Builder.getInt32(2));
if (numElementsDst == 4)
- Args.push_back(llvm::UndefValue::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext())));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
@@ -2733,11 +2788,11 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
Expr *BaseExpr = E->getBase();
if (BaseExpr->isRValue()) {
- V = CreateTempAlloca(ClassPtrTy, "resval");
+ V = CreateMemTemp(E->getType(), "resval");
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeAddrLValue(V, E->getType()));
+ MakeNaturalAlignAddrLValue(V, E->getType()));
} else {
if (E->isArrow())
V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
@@ -2748,7 +2803,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
// build Class* type
ClassPtrTy = ClassPtrTy->getPointerTo();
V = Builder.CreateBitCast(V, ClassPtrTy);
- return MakeAddrLValue(V, E->getType());
+ return MakeNaturalAlignAddrLValue(V, E->getType());
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 51f20534d116..d0aa0f5567a5 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -29,6 +29,10 @@ using namespace CodeGen;
typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result);
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
@@ -47,6 +51,140 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
+/// EmitObjCNumericLiteral - This routine generates code for
+/// the appropriate +[NSNumber numberWith<Type>:] method.
+///
+llvm::Value *
+CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ // Generate the correct selector for this literal's concrete type.
+ const Expr *NL = E->getNumber();
+ // Get the method.
+ const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
+ assert(Method && "NSNumber method is null");
+ Selector Sel = Method->getSelector();
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ QualType ResultType = E->getType(); // should be NSNumber *
+ const ObjCObjectPointerType *InterfacePointerType =
+ ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *NSNumberDecl =
+ InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
+
+ const ParmVarDecl *argDecl = *Method->param_begin();
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ RValue RV = EmitAnyExpr(NL);
+ CallArgList Args;
+ Args.add(RV, ArgQT);
+
+ RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, Sel, Receiver, Args,
+ NSNumberDecl, Method);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects) {
+ ASTContext &Context = CGM.getContext();
+ const ObjCDictionaryLiteral *DLE = 0;
+ const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
+ if (!ALE)
+ DLE = cast<ObjCDictionaryLiteral>(E);
+
+ // Compute the type of the array we're initializing.
+ uint64_t NumElements =
+ ALE ? ALE->getNumElements() : DLE->getNumElements();
+ llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
+ NumElements);
+ QualType ElementType = Context.getObjCIdType().withConst();
+ QualType ElementArrayType
+ = Context.getConstantArrayType(ElementType, APNumElements,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+
+ // Allocate the temporary array(s).
+ llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects");
+ llvm::Value *Keys = 0;
+ if (DLE)
+ Keys = CreateMemTemp(ElementArrayType, "keys");
+
+ // Perform the actual initialialization of the array(s).
+ for (uint64_t i = 0; i < NumElements; i++) {
+ if (ALE) {
+ // Emit the initializer.
+ const Expr *Rhs = ALE->getElement(i);
+ LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Rhs->getType()),
+ Context);
+ EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
+ } else {
+ // Emit the key initializer.
+ const Expr *Key = DLE->getKeyValueElement(i).Key;
+ LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
+ ElementType,
+ Context.getTypeAlignInChars(Key->getType()),
+ Context);
+ EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
+
+ // Emit the value initializer.
+ const Expr *Value = DLE->getKeyValueElement(i).Value;
+ LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Value->getType()),
+ Context);
+ EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
+ }
+ }
+
+ // Generate the argument list.
+ CallArgList Args;
+ ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
+ const ParmVarDecl *argDecl = *PI++;
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Objects), ArgQT);
+ if (DLE) {
+ argDecl = *PI++;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Keys), ArgQT);
+ }
+ argDecl = *PI;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ llvm::Value *Count =
+ llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
+ Args.add(RValue::get(Count), ArgQT);
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ Selector Sel = MethodWithObjects->getSelector();
+ QualType ResultType = E->getType();
+ const ObjCObjectPointerType *InterfacePointerType
+ = ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *Class
+ = InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, Class);
+
+ // Generate the message send.
+ RValue result
+ = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ MethodWithObjects->getResultType(),
+ Sel,
+ Receiver, Args, Class,
+ MethodWithObjects);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
+ const ObjCDictionaryLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
+}
+
/// Emit a selector.
llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
// Untyped selector.
@@ -143,7 +281,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// though.
bool retainSelf =
(!isDelegateInit &&
- CGM.getLangOptions().ObjCAutoRefCount &&
+ CGM.getLangOpts().ObjCAutoRefCount &&
method &&
method->hasAttr<NSConsumesSelfAttr>());
@@ -197,7 +335,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// In ARC, we sometimes want to "extend the lifetime"
// (i.e. retain+autorelease) of receivers of returns-inner-pointer
// messages.
- if (getLangOptions().ObjCAutoRefCount && method &&
+ if (getLangOpts().ObjCAutoRefCount && method &&
method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
shouldExtendReceiverForInnerPointerMessage(E))
Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
@@ -216,7 +354,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// be an undefined read and write of an object in unordered
// expressions.
if (isDelegateInit) {
- assert(getLangOptions().ObjCAutoRefCount &&
+ assert(getLangOpts().ObjCAutoRefCount &&
"delegate init calls should only be marked in ARC");
// Do an unsafe store of null into self.
@@ -307,14 +445,14 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
- const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
args.push_back(OMD->getSelfDecl());
args.push_back(OMD->getCmdDecl());
for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
- E = OMD->param_end(); PI != E; ++PI)
+ E = OMD->param_end(); PI != E; ++PI)
args.push_back(*PI);
CurGD = OMD;
@@ -322,7 +460,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
// In ARC, certain methods get an extra cleanup.
- if (CGM.getLangOptions().ObjCAutoRefCount &&
+ if (CGM.getLangOpts().ObjCAutoRefCount &&
OMD->isInstanceMethod() &&
OMD->getSelector().isUnarySelector()) {
const IdentifierInfo *ident =
@@ -369,8 +507,9 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(Context.VoidTy, args,
- FunctionType::ExtInfo()),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
fn, ReturnValueSlot(), args);
}
@@ -467,13 +606,13 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Handle retain.
if (setterKind == ObjCPropertyDecl::Retain) {
// In GC-only, there's nothing special that needs to be done.
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
// fallthrough
// In ARC, if the property is non-atomic, use expression emission,
// which translates to objc_storeStrong. This isn't required, but
// it's slightly nicer.
- } else if (CGM.getLangOptions().ObjCAutoRefCount && !IsAtomic) {
+ } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
Kind = Expression;
return;
@@ -508,14 +647,14 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// expressions. This actually works out to being atomic anyway,
// except for ARC __strong, but that should trigger the above code.
if (ivarType.hasNonTrivialObjCLifetime() ||
- (CGM.getLangOptions().getGC() &&
+ (CGM.getLangOpts().getGC() &&
CGM.getContext().getObjCGCAttrKind(ivarType))) {
Kind = Expression;
return;
}
// Compute whether the ivar has strong members.
- if (CGM.getLangOptions().getGC())
+ if (CGM.getLangOpts().getGC())
if (const RecordType *recordType = ivarType->getAs<RecordType>())
HasStrong = recordType->getDecl()->hasObjectMember();
@@ -564,12 +703,14 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicGetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCGetterBody(IMP, PID);
+ generateObjCGetterBody(IMP, PID, AtomicHelperFn);
FinishFunction();
}
@@ -597,14 +738,53 @@ static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
return false;
}
+/// emitCPPObjectAtomicGetterCall - Call the runtime function to
+/// copy the ivar into the resturn slot.
+static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
+ llvm::Value *returnAddr,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The 1st argument is the return Slot.
+ args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
+
+ // The 2nd argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
- ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
- /*nrvo*/ 0);
- EmitReturnStmt(ret);
+ if (!AtomicHelperFn) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ 0);
+ EmitReturnStmt(ret);
+ }
+ else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ ivar, AtomicHelperFn);
+ }
return;
}
@@ -670,8 +850,9 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(getTypes().getFunctionInfo(propType, args,
- FunctionType::ExtInfo()),
+ RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
getPropertyFn, ReturnValueSlot(), args);
// We need to fix the type here. Ivars with copy & retain are
@@ -752,7 +933,8 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
- DeclRefExpr argRef(argVar, argVar->getType(), VK_LValue, SourceLocation());
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -770,11 +952,52 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo()),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyStructFn, ReturnValueSlot(), args);
}
+/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
+/// the value from the first formal parameter into the given ivar, using
+/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
+static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
+ ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+
+
+}
+
+
static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
Expr *setter = PID->getSetterCXXAssignment();
if (!setter) return true;
@@ -799,20 +1022,38 @@ static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
return false;
}
+static bool UseOptimizedSetter(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+ const TargetInfo &Target = CGM.getContext().getTargetInfo();
+
+ if (Target.getPlatformName() != "macosx")
+ return false;
+
+ return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+}
+
void
CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
// Just use the setter expression if Sema gave us one and it's
- // non-trivial. There's no way to do this atomically.
+ // non-trivial.
if (!hasTrivialSetExpr(propImpl)) {
- EmitStmt(propImpl->getSetterCXXAssignment());
+ if (!AtomicHelperFn)
+ // If non-atomic, assignment is called directly.
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ else
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
+ AtomicHelperFn);
return;
}
- const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
- ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
-
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
@@ -845,13 +1086,28 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Value *setPropertyFn =
- CGM.getObjCRuntime().GetPropertySetFunction();
- if (!setPropertyFn) {
- CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
- return;
+
+ llvm::Value *setOptimizedPropertyFn = 0;
+ llvm::Value *setPropertyFn = 0;
+ if (UseOptimizedSetter(CGM)) {
+ // 10.8 code and GC is off
+ setOptimizedPropertyFn =
+ CGM.getObjCRuntime()
+ .GetOptimizedPropertySetFunction(strategy.isAtomic(),
+ strategy.isCopy());
+ if (!setOptimizedPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
+ return;
+ }
}
-
+ else {
+ setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+ }
+
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
llvm::Value *cmd =
@@ -866,17 +1122,28 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
args.add(RValue::get(cmd), getContext().getObjCSelType());
- args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
- args.add(RValue::get(arg), getContext().getObjCIdType());
- args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
- getContext().BoolTy);
- args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
- getContext().BoolTy);
- // FIXME: We shouldn't need to get the function info here, the runtime
- // already should have computed it to build the function.
- EmitCall(getTypes().getFunctionInfo(getContext().VoidTy, args,
- FunctionType::ExtInfo()),
- setPropertyFn, ReturnValueSlot(), args);
+ if (setOptimizedPropertyFn) {
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setOptimizedPropertyFn, ReturnValueSlot(), args);
+ } else {
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setPropertyFn, ReturnValueSlot(), args);
+ }
+
return;
}
@@ -890,7 +1157,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Otherwise, fake up some ASTs and emit a normal assignment.
ValueDecl *selfDecl = setterMethod->getSelfDecl();
- DeclRefExpr self(selfDecl, selfDecl->getType(), VK_LValue, SourceLocation());
+ DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ VK_LValue, SourceLocation());
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
selfDecl->getType(), CK_LValueToRValue, &self,
VK_RValue);
@@ -899,7 +1167,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
ParmVarDecl *argDecl = *setterMethod->param_begin();
QualType argType = argDecl->getType().getNonReferenceType();
- DeclRefExpr arg(argDecl, argType, VK_LValue, SourceLocation());
+ DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
&arg, VK_RValue);
@@ -943,12 +1211,14 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicSetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCSetterBody(IMP, PID);
+ generateObjCSetterBody(IMP, PID, AtomicHelperFn);
FinishFunction();
}
@@ -958,13 +1228,13 @@ namespace {
private:
llvm::Value *addr;
const ObjCIvarDecl *ivar;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : addr(addr), ivar(ivar), destroyer(*destroyer),
+ : addr(addr), ivar(ivar), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -1004,11 +1274,11 @@ static void emitCXXDestructMethod(CodeGenFunction &CGF,
// Use a call to objc_storeStrong to destroy strong ivars, for the
// general benefit of the tools.
if (dtorKind == QualType::DK_objc_strong_lifetime) {
- destroyer = &destroyARCStrongWithStore;
+ destroyer = destroyARCStrongWithStore;
// Otherwise use the default for the destruction kind.
} else {
- destroyer = &CGF.getDestroyer(dtorKind);
+ destroyer = CGF.getDestroyer(dtorKind);
}
CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
@@ -1067,7 +1337,7 @@ bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
}
bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC)
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
return false;
if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
return FDTTy->getDecl()->hasObjectMember();
@@ -1087,117 +1357,6 @@ QualType CodeGenFunction::TypeOfSelfObject() {
return PTy->getPointeeType();
}
-LValue
-CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
- // This is a special l-value that just issues sends when we load or
- // store through it.
-
- // For certain base kinds, we need to emit the base immediately.
- llvm::Value *Base;
- if (E->isSuperReceiver())
- Base = LoadObjCSelf();
- else if (E->isClassReceiver())
- Base = CGM.getObjCRuntime().GetClass(Builder, E->getClassReceiver());
- else
- Base = EmitScalarExpr(E->getBase());
- return LValue::MakePropertyRef(E, Base);
-}
-
-static RValue GenerateMessageSendSuper(CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- Selector S,
- llvm::Value *Receiver,
- const CallArgList &CallArgs) {
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CGF.CurFuncDecl);
- bool isClassMessage = OMD->isClassMethod();
- bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGF.CGM.getObjCRuntime()
- .GenerateMessageSendSuper(CGF, Return, ResultType,
- S, OMD->getClassInterface(),
- isCategoryImpl, Receiver,
- isClassMessage, CallArgs);
-}
-
-RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
- ReturnValueSlot Return) {
- const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
- QualType ResultType = E->getGetterResultType();
- Selector S;
- const ObjCMethodDecl *method;
- if (E->isExplicitProperty()) {
- const ObjCPropertyDecl *Property = E->getExplicitProperty();
- S = Property->getGetterName();
- method = Property->getGetterMethodDecl();
- } else {
- method = E->getImplicitPropertyGetter();
- S = method->getSelector();
- }
-
- llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
-
- if (CGM.getLangOptions().ObjCAutoRefCount) {
- QualType receiverType;
- if (E->isSuperReceiver())
- receiverType = E->getSuperReceiverType();
- else if (E->isClassReceiver())
- receiverType = getContext().getObjCClassType();
- else
- receiverType = E->getBase()->getType();
- }
-
- // Accesses to 'super' follow a different code path.
- if (E->isSuperReceiver())
- return AdjustRelatedResultType(*this, E, method,
- GenerateMessageSendSuper(*this, Return,
- ResultType,
- S, Receiver,
- CallArgList()));
- const ObjCInterfaceDecl *ReceiverClass
- = (E->isClassReceiver() ? E->getClassReceiver() : 0);
- return AdjustRelatedResultType(*this, E, method,
- CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass));
-}
-
-void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
- LValue Dst) {
- const ObjCPropertyRefExpr *E = Dst.getPropertyRefExpr();
- Selector S = E->getSetterSelector();
- QualType ArgType = E->getSetterArgType();
-
- // FIXME. Other than scalars, AST is not adequate for setter and
- // getter type mismatches which require conversion.
- if (Src.isScalar()) {
- llvm::Value *SrcVal = Src.getScalarVal();
- QualType DstType = getContext().getCanonicalType(ArgType);
- llvm::Type *DstTy = ConvertType(DstType);
- if (SrcVal->getType() != DstTy)
- Src =
- RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
- }
-
- CallArgList Args;
- Args.add(Src, ArgType);
-
- llvm::Value *Receiver = Dst.getPropertyRefBaseAddr();
- QualType ResultType = getContext().VoidTy;
-
- if (E->isSuperReceiver()) {
- GenerateMessageSendSuper(*this, ReturnValueSlot(),
- ResultType, S, Receiver, Args);
- return;
- }
-
- const ObjCInterfaceDecl *ReceiverClass
- = (E->isClassReceiver() ? E->getClassReceiver() : 0);
-
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- ResultType, S, Receiver, Args,
- ReceiverClass);
-}
-
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
@@ -1243,7 +1402,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Collection = EmitARCRetainScalarExpr(S.getCollection());
// Enter a cleanup to do the release.
@@ -1343,8 +1502,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
- FunctionType::ExtInfo()),
+ EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
EnumerationMutationFn, ReturnValueSlot(), Args2);
// Otherwise, or if the mutation function returns, just continue.
@@ -1360,7 +1520,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitAutoVarInit(variable);
const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
- DeclRefExpr tempDRE(const_cast<VarDecl*>(D), D->getType(),
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
VK_LValue, SourceLocation());
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
@@ -1467,7 +1627,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
// Leave the cleanup we entered in ARC.
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
PopCleanupBlock();
EmitBlock(LoopEnd.getBlock());
@@ -1604,9 +1764,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
== value->getType());
if (!fn) {
- std::vector<llvm::Type*> argTypes(2);
- argTypes[0] = CGF.Int8PtrPtrTy;
- argTypes[1] = CGF.Int8PtrTy;
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
@@ -1720,8 +1878,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
// in a moment.
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
llvm::FunctionType *type =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- /*variadic*/ false);
+ llvm::FunctionType::get(VoidTy, /*variadic*/false);
marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
@@ -1813,7 +1970,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
// lvalue is inadequately aligned.
if (shouldUseFusedARCCalls() &&
!isBlock &&
- !(dst.getAlignment() && dst.getAlignment() < PointerAlignInBytes)) {
+ (dst.getAlignment().isZero() ||
+ dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
@@ -2086,7 +2244,7 @@ namespace {
}
void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
else
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
@@ -2109,7 +2267,6 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
}
llvm_unreachable("impossible lifetime!");
- return TryEmitResult();
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
@@ -2138,7 +2295,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
// As a very special optimization, in ARC++, if the l-value is the
// result of a non-volatile assignment, do a simple retain of the
// result of the call to objc_storeWeak instead of reloading.
- if (CGF.getLangOptions().CPlusPlus &&
+ if (CGF.getLangOpts().CPlusPlus &&
!type.isVolatileQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Weak &&
isa<BinaryOperator>(e) &&
@@ -2233,10 +2390,64 @@ static bool shouldEmitSeparateBlockRetain(const Expr *e) {
return true;
}
+/// Try to emit a PseudoObjectExpr at +1.
+///
+/// This massively duplicates emitPseudoObjectRValue.
+static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression.
+ const Expr *resultExpr = E->getResultExpr();
+ assert(resultExpr);
+ TryEmitResult result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+
+ // If this semantic is the result of the pseudo-object
+ // expression, try to evaluate the source as +1.
+ if (ov == resultExpr) {
+ assert(!OVMA::shouldBindAsLValue(ov));
+ result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
+ opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
+
+ // Otherwise, just bind it.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+ }
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ result = tryEmitARCRetainScalarExpr(CGF, semantic);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// Look through cleanups.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ CGF.enterFullExpression(cleanups);
CodeGenFunction::RunCleanupsScope scope(CGF);
return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
}
@@ -2331,12 +2542,6 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
return TryEmitResult(result, true);
}
- case CK_GetObjCProperty: {
- llvm::Value *result = emitARCRetainCall(CGF, ce);
- if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
- return TryEmitResult(result, true);
- }
-
default:
break;
}
@@ -2358,6 +2563,17 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
llvm::Value *result = emitARCRetainCall(CGF, e);
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
+
+ // Look through pseudo-object expressions.
+ } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ TryEmitResult result
+ = tryEmitARCRetainPseudoObject(CGF, pseudo);
+ if (resultType) {
+ llvm::Value *value = result.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ result.setPointer(value);
+ }
+ return result;
}
// Conservatively halt the search at any other expression kind.
@@ -2424,17 +2640,19 @@ llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
// In ARC, retain and autorelease the expression.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
// Do so before running any cleanups for the full-expression.
// tryEmitARCRetainScalarExpr does make an effort to do things
// inside cleanups, but there are crazy cases like
// @throw A().foo;
// where a full retain+autorelease is required and would
// otherwise happen after the destructor for the temporary.
- CodeGenFunction::RunCleanupsScope cleanups(*this);
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr))
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
+ enterFullExpression(ewc);
expr = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
return EmitARCRetainAutoreleaseScalarExpr(expr);
}
@@ -2468,12 +2686,8 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
llvm::Value *oldValue =
- EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
- lvalue.getAlignment(), e->getType(),
- lvalue.getTBAAInfo());
- EmitStoreOfScalar(value, lvalue.getAddress(),
- lvalue.isVolatileQualified(), lvalue.getAlignment(),
- e->getType(), lvalue.getTBAAInfo());
+ EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, /*precise*/ false);
} else {
value = EmitARCStoreStrong(lvalue, value, ignored);
@@ -2487,15 +2701,13 @@ CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
LValue lvalue = EmitLValue(e->getLHS());
- EmitStoreOfScalar(value, lvalue.getAddress(),
- lvalue.isVolatileQualified(), lvalue.getAlignment(),
- e->getType(), lvalue.getTBAAInfo());
+ EmitStoreOfScalar(value, lvalue);
return std::pair<LValue,llvm::Value*>(lvalue, value);
}
void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
- const ObjCAutoreleasePoolStmt &ARPS) {
+ const ObjCAutoreleasePoolStmt &ARPS) {
const Stmt *subStmt = ARPS.getSubStmt();
const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
@@ -2526,7 +2738,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
llvm::FunctionType *extenderType
- = llvm::FunctionType::get(VoidTy, VoidPtrTy, /*variadic*/ false);
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
llvm::Value *extender
= llvm::InlineAsm::get(extenderType,
/* assembly */ "",
@@ -2537,4 +2749,226 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
Builder.CreateCall(extender, object)->setDoesNotThrow();
}
+/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
+/// non-trivial copy assignment function, produce following helper function.
+/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
+///
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+ if (hasTrivialSetExpr(PID))
+ return 0;
+ assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
+ if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
+ return HelperFn;
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__assign_helper_atomic_property_",
+ &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ Expr *Args[2] = { &DST, &SRC };
+ CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
+ CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
+ Args, 2, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation());
+
+ EmitStmt(&TheCall);
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ QualType Ty = PD->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+
+ if (hasTrivialGetExpr(PID))
+ return 0;
+ assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
+ if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
+ return HelperFn;
+
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_atomic_property_", &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ CXXConstructExpr *CXXConstExpr =
+ cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
+
+ SmallVector<Expr*, 4> ConstructorArgs;
+ ConstructorArgs.push_back(&SRC);
+ CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
+ ++A;
+
+ for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
+ A != AEnd; ++A)
+ ConstructorArgs.push_back(*A);
+
+ CXXConstructExpr *TheCXXConstructExpr =
+ CXXConstructExpr::Create(C, Ty, SourceLocation(),
+ CXXConstExpr->getConstructor(),
+ CXXConstExpr->isElidable(),
+ &ConstructorArgs[0], ConstructorArgs.size(),
+ CXXConstExpr->hadMultipleCandidates(),
+ CXXConstExpr->isListInitialization(),
+ CXXConstExpr->requiresZeroInitialization(),
+ CXXConstExpr->getConstructionKind(),
+ SourceRange());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+
+ RValue DV = EmitAnyExpr(&DstExpr);
+ CharUnits Alignment
+ = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ EmitAggExpr(TheCXXConstructExpr,
+ AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Value *
+CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
+ // Get selectors for retain/autorelease.
+ IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ Selector CopySelector =
+ getContext().Selectors.getNullarySelector(CopyID);
+ IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ Selector AutoreleaseSelector =
+ getContext().Selectors.getNullarySelector(AutoreleaseID);
+
+ // Emit calls to retain/autorelease.
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Val = Block;
+ RValue Result;
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, CopySelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, AutoreleaseSelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ return Val;
+}
+
+
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 688e9924be50..db0bd951c1e8 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -18,7 +18,6 @@
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
#include "CGCleanup.h"
-
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -104,8 +103,6 @@ class LazyRuntimeFunction {
/// GNUstep).
class CGObjCGNU : public CGObjCRuntime {
protected:
- /// The module that is using this class
- CodeGenModule &CGM;
/// The LLVM module into which output is inserted
llvm::Module &TheModule;
/// strut objc_super. Used for sending messages to super. This structure
@@ -187,7 +184,7 @@ protected:
std::string name = prefix + Str;
llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
if (!ConstStr) {
- llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
}
@@ -288,6 +285,10 @@ private:
LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
WeakAssignFn, GlobalAssignFn;
+ typedef std::pair<std::string, std::string> ClassAliasPair;
+ /// All classes that have aliases set for them.
+ std::vector<ClassAliasPair> ClassAliases;
+
protected:
/// Function used for throwing Objective-C exceptions.
LazyRuntimeFunction ExceptionThrowFn;
@@ -336,10 +337,9 @@ private:
/// containing a size and an array of structures containing instance variable
/// metadata. This is used purely for introspection in the fragile ABI. In
/// the non-fragile ABI, it's used for instance variable fixup.
- llvm::Constant *GenerateIvarList(
- const SmallVectorImpl<llvm::Constant *> &IvarNames,
- const SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets);
/// Generates a method list structure. This is a structure containing a size
/// and an array of structures containing method metadata.
///
@@ -347,8 +347,8 @@ private:
/// pointer allowing them to be chained together in a linked list.
llvm::Constant *GenerateMethodList(const StringRef &ClassName,
const StringRef &CategoryName,
- const SmallVectorImpl<Selector> &MethodSels,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
/// Emits an empty protocol. This is used for @protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
@@ -361,8 +361,7 @@ private:
SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
/// Generates a list of referenced protocols. Classes, categories, and
/// protocols all use this structure.
- llvm::Constant *GenerateProtocolList(
- const SmallVectorImpl<std::string> &Protocols);
+ llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols);
/// To ensure that all protocols are seen by the runtime, we add a category on
/// a class defined in the runtime, declaring no methods, but adopting the
/// protocols. This is a horribly ugly hack, but it allows us to collect all
@@ -387,8 +386,8 @@ private:
/// Generates a method list. This is used by protocols to define the required
/// and optional methods.
llvm::Constant *GenerateProtocolMethodList(
- const SmallVectorImpl<llvm::Constant *> &MethodNames,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes);
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
@@ -427,7 +426,7 @@ protected:
/// significant bit being assumed to come first in the bitfield. Therefore,
/// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
/// while a bitfield / with the 63rd bit set will be 1<<64.
- llvm::Constant *MakeBitField(llvm::SmallVectorImpl<bool> &bits);
+ llvm::Constant *MakeBitField(ArrayRef<bool> bits);
public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion);
@@ -466,13 +465,17 @@ public:
const ObjCContainerDecl *CD);
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD);
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
virtual llvm::Function *ModuleInitFunction();
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
virtual llvm::Constant *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
@@ -539,8 +542,8 @@ protected:
llvm::MDNode *node) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *args[] = {
- EnforceType(Builder, Receiver, IdTy),
- EnforceType(Builder, cmd, SelectorTy) };
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
imp->setMetadata(msgSendMDKind, node);
return imp.getInstruction();
@@ -599,9 +602,9 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn->setDoesNotCapture(1);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr, PtrToIdTy),
- EnforceType(Builder, cmd, SelectorTy),
- EnforceType(Builder, self, IdTy) };
+ EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy) };
llvm::CallSite slot = CGF.EmitCallOrInvoke(LookupFn, args);
slot.setOnlyReadsMemory();
slot->setMetadata(msgSendMDKind, node);
@@ -638,7 +641,7 @@ class CGObjCGNUstep : public CGObjCGNU {
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy, NULL);
// If we're in ObjC++ mode, then we want to make
- if (CGM.getLangOptions().CPlusPlus) {
+ if (CGM.getLangOpts().CPlusPlus) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
@@ -683,9 +686,9 @@ static std::string SymbolNameForMethod(const StringRef &ClassName,
CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion)
- : CGM(cgm), TheModule(CGM.getModule()), VMContext(cgm.getLLVMContext()),
- ClassPtrAlias(0), MetaClassPtrAlias(0), RuntimeVersion(runtimeABIVersion),
- ProtocolVersion(protocolClassVersion) {
+ : CGObjCRuntime(cgm), TheModule(CGM.getModule()),
+ VMContext(cgm.getLLVMContext()), ClassPtrAlias(0), MetaClassPtrAlias(0),
+ RuntimeVersion(runtimeABIVersion), ProtocolVersion(protocolClassVersion) {
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
@@ -770,7 +773,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
true));
- const LangOptions &Opts = CGM.getLangOptions();
+ const LangOptions &Opts = CGM.getLangOpts();
if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
RuntimeVersion = 10;
@@ -879,14 +882,14 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
- if (!CGM.getLangOptions().CPlusPlus) {
+ if (!CGM.getLangOpts().CPlusPlus) {
if (T->isObjCIdType()
|| T->isObjCQualifiedIdType()) {
// With the old ABI, there was only one kind of catchall, which broke
// foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
// a pointer indicating object catchalls, and NULL to indicate real
// catchalls
- if (CGM.getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
return MakeConstantString("@id");
} else {
return 0;
@@ -930,7 +933,8 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
// Return the existing typeinfo if it exists
llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
- if (typeinfo) return typeinfo;
+ if (typeinfo)
+ return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
// Otherwise create it.
@@ -970,12 +974,27 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
if (old != ObjCStrings.end())
return old->getValue();
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ std::string Sym = "_OBJC_CLASS_";
+ Sym += StringClass;
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalWeakLinkage, 0, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
std::vector<llvm::Constant*> Ivars;
- Ivars.push_back(NULLPtr);
+ Ivars.push_back(isa);
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
@@ -998,7 +1017,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method) {
CGBuilderTy &Builder = CGF.Builder;
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -1017,9 +1036,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
llvm::Value *ReceiverClass = 0;
if (isCategoryImpl) {
@@ -1072,12 +1089,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
- llvm::FunctionType *impType =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
// Get the IMP
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
- imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Value *impMD[] = {
llvm::MDString::get(VMContext, Sel.getAsString()),
@@ -1087,8 +1102,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
- 0, &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
}
@@ -1106,7 +1120,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
// Strip out message sends to retain / release in GC mode
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -1161,27 +1175,46 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
- // Get the IMP to call
- llvm::Value *imp = LookupIMP(CGF, Receiver, cmd, node);
-
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
- llvm::FunctionType *impType =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
- imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ // Get the IMP to call
+ llvm::Value *imp;
+
+ // If we have non-legacy dispatch specified, we try using the objc_msgSend()
+ // functions. These are not supported on all platforms (or all runtimes on a
+ // given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ imp = LookupIMP(CGF, Receiver, cmd, node);
+ break;
+ case CodeGenOptions::Mixed:
+ case CodeGenOptions::NonLegacy:
+ if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret");
+ } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret");
+ } else {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend");
+ }
+ }
+
+ // Reset the receiver in case the lookup modified it
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
- // For sender-aware dispatch, we pass the sender as the third argument to a
- // lookup function. When sending messages from C code, the sender is nil.
- // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
0, &call);
call->setMetadata(msgSendMDKind, node);
@@ -1225,11 +1258,12 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
-llvm::Constant *CGObjCGNU::GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
- const SmallVectorImpl<Selector> &MethodSels,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes,
- bool isClassMethodList) {
+llvm::Constant *CGObjCGNU::
+GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
// Get the method structure type.
@@ -1282,10 +1316,10 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const StringRef &ClassName,
}
/// Generates an IvarList. Used in construction of a objc_class.
-llvm::Constant *CGObjCGNU::GenerateIvarList(
- const SmallVectorImpl<llvm::Constant *> &IvarNames,
- const SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+llvm::Constant *CGObjCGNU::
+GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets) {
if (IvarNames.size() == 0)
return NULLPtr;
// Get the method structure type.
@@ -1345,7 +1379,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// anyway; the classes will still work with the GNU runtime, they will just
// be ignored.
llvm::StructType *ClassTy = llvm::StructType::get(
- PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // isa
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
LongTy, // version
@@ -1396,14 +1430,25 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(WeakIvarBitmap);
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
- // messages in the next ABI.
- return MakeGlobal(ClassTy, Elements, (isMeta ? "_OBJC_METACLASS_":
- "_OBJC_CLASS_") + std::string(Name), llvm::GlobalValue::ExternalLinkage);
+ // messages in the next ABI. We may already have some weak references to
+ // this, so check and fix them properly.
+ std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
+ std::string(Name));
+ llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
+ llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
+ if (ClassRef) {
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->getType()));
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
+ }
+ return Class;
}
-llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
- const SmallVectorImpl<llvm::Constant *> &MethodNames,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+llvm::Constant *CGObjCGNU::
+GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes) {
// Get the method structure type.
llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
@@ -1430,8 +1475,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
}
// Create the protocol list structure used in classes, categories and so on
-llvm::Constant *CGObjCGNU::GenerateProtocolList(
- const SmallVectorImpl<std::string> &Protocols) {
+llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(
@@ -1506,6 +1550,11 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
ASTContext &Context = CGM.getContext();
std::string ProtocolName = PD->getNameAsString();
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
SmallVector<std::string, 16> Protocols;
for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
E = PD->protocol_end(); PI != E; ++PI)
@@ -1723,7 +1772,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
/// significant bit being assumed to come first in the bitfield. Therefore, a
/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
/// bitfield / with the 63rd bit set will be 1<<64.
-llvm::Constant *CGObjCGNU::MakeBitField(llvm::SmallVectorImpl<bool> &bits) {
+llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
int bitCount = bits.size();
int ptrBits =
(TheModule.getPointerSize() == llvm::Module::Pointer32) ? 32 : 64;
@@ -1881,6 +1930,15 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
".objc_property_list");
}
+void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
+ // Get the class declaration for which the alias is specified.
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ std::string AliasName = OAD->getNameAsString();
+ ClassAliases.push_back(ClassAliasPair(ClassName,AliasName));
+}
+
void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ASTContext &Context = CGM.getContext();
@@ -1926,7 +1984,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -1941,7 +1999,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2013,9 +2071,9 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
}
// Collect the names of referenced protocols
SmallVector<std::string, 16> Protocols;
- const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
- E = Protos.end(); I != E; ++I)
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end(); I != E; ++I)
Protocols.push_back((*I)->getNameAsString());
@@ -2063,15 +2121,16 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the existing variable, if one exists.
llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
if (offset) {
- offset->setInitializer(offsetValue);
- // If this is the real definition, change its linkage type so that
- // different modules will use this one, rather than their private
- // copy.
- offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
} else {
- // Add a new alias if there isn't one already.
- offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
- false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ (void) offset; // Silence dead store warning.
}
++ivarIndex;
}
@@ -2135,7 +2194,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
ConstantStrings.size() + 1);
ConstantStrings.push_back(NULLPtr);
- StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
if (StringClass.empty()) StringClass = "NXConstantString";
@@ -2263,12 +2322,12 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(SymTab);
if (RuntimeVersion >= 10)
- switch (CGM.getLangOptions().getGC()) {
+ switch (CGM.getLangOpts().getGC()) {
case LangOptions::GCOnly:
Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
break;
case LangOptions::NonGC:
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
else
Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
@@ -2296,6 +2355,46 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::PointerType::getUnqual(ModuleTy), true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
+
+ if (!ClassAliases.empty()) {
+ llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
+ llvm::FunctionType *RegisterAliasTy =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ ArgTypes, false);
+ llvm::Function *RegisterAlias = llvm::Function::Create(
+ RegisterAliasTy,
+ llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np",
+ &TheModule);
+ llvm::BasicBlock *AliasBB =
+ llvm::BasicBlock::Create(VMContext, "alias", LoadFunction);
+ llvm::BasicBlock *NoAliasBB =
+ llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction);
+
+ // Branch based on whether the runtime provided class_registerAlias_np()
+ llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias,
+ llvm::Constant::getNullValue(RegisterAlias->getType()));
+ Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
+
+ // The true branch (has alias registration fucntion):
+ Builder.SetInsertPoint(AliasBB);
+ // Emit alias registration calls:
+ for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
+ iter != ClassAliases.end(); ++iter) {
+ llvm::Constant *TheClass =
+ TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
+ true);
+ if (0 != TheClass) {
+ TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
+ Builder.CreateCall2(RegisterAlias, TheClass,
+ MakeConstantString(iter->second));
+ }
+ }
+ // Jump to end:
+ Builder.CreateBr(NoAliasBB);
+
+ // Missing alias registration function, just return from the function:
+ Builder.SetInsertPoint(NoAliasBB);
+ }
Builder.CreateRetVoid();
return LoadFunction;
@@ -2312,7 +2411,7 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
- Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
@@ -2332,12 +2431,20 @@ llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
return SetPropertyFn;
}
+llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return 0;
+}
+
llvm::Constant *CGObjCGNU::GetGetStructFunction() {
return GetStructPropertyFn;
}
llvm::Constant *CGObjCGNU::GetSetStructFunction() {
return SetStructPropertyFn;
}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectFunction() {
+ return 0;
+}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
return EnumerationMutationFn;
@@ -2487,7 +2594,7 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// to replace it with the real version for a library. In non-PIC code you
// must compile with the fragile ABI if you want to use ivars from a
// GCC-compiled class.
- if (CGM.getLangOptions().PICLevel) {
+ if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) {
llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
Int32Ty, false,
llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
@@ -2533,7 +2640,7 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
- if (CGM.getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
@@ -2547,7 +2654,10 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
Offset = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::LinkOnceAnyLinkage,
llvm::Constant::getNullValue(IntTy), name);
- return CGF.Builder.CreateLoad(Offset);
+ Offset = CGF.Builder.CreateLoad(Offset);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
@@ -2555,7 +2665,7 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGObjCRuntime *
clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
- if (CGM.getLangOptions().ObjCNonFragileABI)
+ if (CGM.getLangOpts().ObjCNonFragileABI)
return new CGObjCGNUstep(CGM);
return new CGObjCGCC(CGM);
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 308e0c7d3786..e5246f101f95 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -42,11 +42,8 @@
using namespace clang;
using namespace CodeGen;
-
namespace {
-typedef std::vector<llvm::Constant*> ConstantVector;
-
// FIXME: We should find a nicer way to make the labels for metadata, string
// concatenation is lame.
@@ -92,13 +89,28 @@ private:
/// would be unbalanced.
llvm::Constant *getMessageSendFpretFn() const {
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getDoubleTy(VMContext),
- params, true),
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
+ params, true),
"objc_msgSend_fpret");
}
+ /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned in two values on the
+ /// x87 floating point stack; without a special entrypoint, the nil case
+ /// would be unbalanced. Only used on 64-bit X86.
+ llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
+ llvm::Type *resultType =
+ llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
+ params, true),
+ "objc_msgSend_fp2ret");
+ }
+
/// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
///
/// The messenger used for super calls, which have different dispatch
@@ -159,7 +171,7 @@ protected:
public:
llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- llvm::Type *Int8PtrTy;
+ llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
llvm::Type *ObjectPtrTy;
@@ -169,10 +181,26 @@ public:
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
llvm::Type *SelectorPtrTy;
+
+private:
/// ProtocolPtrTy - LLVM type for external protocol handles
/// (typeof(Protocol))
llvm::Type *ExternalProtocolPtrTy;
-
+
+public:
+ llvm::Type *getExternalProtocolPtrTy() {
+ if (!ExternalProtocolPtrTy) {
+ // FIXME: It would be nice to unify this with the opaque type, so that the
+ // IR comes out a bit cleaner.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+ }
+
+ return ExternalProtocolPtrTy;
+ }
+
// SuperCTy - clang type for struct objc_super.
QualType SuperCTy;
// SuperPtrCTy - clang type for struct objc_super *.
@@ -213,9 +241,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -233,12 +261,47 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
+ llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty_atomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_atomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ const char *name;
+ if (atomic && copy)
+ name = "objc_setProperty_atomic_copy";
+ else if (atomic && !copy)
+ name = "objc_setProperty_atomic";
+ else if (!atomic && copy)
+ name = "objc_setProperty_nonatomic_copy";
+ else
+ name = "objc_setProperty_nonatomic";
+
+ return CGM.CreateRuntimeFunction(FTy, name);
+ }
llvm::Constant *getCopyStructFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -251,12 +314,31 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
+ /// This routine declares and returns address of:
+ /// void objc_copyCppObjectAtomic(
+ /// void *dest, const void *src,
+ /// void (*copyHelper) (void *dest, const void *source));
+ llvm::Constant *getCppAtomicObjectFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
+ SmallVector<CanQualType,3> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
+ }
+
llvm::Constant *getEnumerationMutationFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -264,9 +346,9 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
@@ -391,6 +473,14 @@ public:
return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
}
+ llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
+ }
+
+ llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
+ }
+
ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCCommonTypesHelper(){}
};
@@ -492,7 +582,8 @@ public:
llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
- "_setjmp");
+ "_setjmp",
+ llvm::Attribute::ReturnsTwice);
}
public:
@@ -663,7 +754,6 @@ public:
};
protected:
- CodeGen::CodeGenModule &CGM;
llvm::LLVMContext &VMContext;
// FIXME! May not be needing this after all.
unsigned ObjCABI;
@@ -718,16 +808,16 @@ protected:
llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
/// DefinedClasses - List of defined classes.
- std::vector<llvm::GlobalValue*> DefinedClasses;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
/// DefinedNonLazyClasses - List of defined "non-lazy" classes.
- std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
/// DefinedCategories - List of defined categories.
- std::vector<llvm::GlobalValue*> DefinedCategories;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
- std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
/// GetNameForMethod - Return a name for the given method.
/// \param[out] NameOut - The return value.
@@ -741,10 +831,11 @@ protected:
llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
/// GetMethodVarType - Return a unique constant for the given
- /// selector's name. The return value has type char *.
+ /// method's type encoding string. The return value has type char *.
// FIXME: This is a horrible name.
- llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended = false);
llvm::Constant *GetMethodVarType(const FieldDecl *D);
/// GetPropertyName - Return a unique constant for the given
@@ -775,7 +866,7 @@ protected:
void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const SmallVectorImpl<const FieldDecl*> &RecFields,
+ ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion);
@@ -791,12 +882,19 @@ protected:
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes);
+ /// EmitProtocolMethodTypes - Generate the array of extended method type
+ /// strings. The return value has type Int8PtrPtrTy.
+ llvm::Constant *EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
/// PushProtocolProperties - Push protocol's property on the input stack.
- void PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- std::vector<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *PROTO,
- const ObjCCommonTypesHelper &ObjCTypes);
+ void PushProtocolProperties(
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
/// GetProtocolRef - Return a reference to the internal protocol
/// description, creating an empty one if it has not been
@@ -840,10 +938,10 @@ protected:
public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
- CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+ CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
-
+
virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=0);
@@ -915,7 +1013,7 @@ private:
/// given implementation. The return value has type ClassPtrTy.
llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
@@ -925,7 +1023,7 @@ private:
/// implementation. The return value has type MethodListPtrTy.
llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// EmitMethodDescList - Emit a method description list for a list of
/// method declarations.
@@ -940,7 +1038,7 @@ private:
/// The return value has type MethodDescriptionListPtrTy.
llvm::Constant *EmitMethodDescList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
@@ -959,8 +1057,9 @@ private:
/// ProtocolExtensionPtrTy.
llvm::Constant *
EmitProtocolExtension(const ObjCProtocolDecl *PD,
- const ConstantVector &OptInstanceMethods,
- const ConstantVector &OptClassMethods);
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt);
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
@@ -1016,13 +1115,18 @@ public:
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
virtual llvm::Constant *GetGetStructFunction();
virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -1096,7 +1200,7 @@ private:
/// AddModuleClassList - Add the given list of class pointers to the
/// module with the provided symbol and section names.
- void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
const char *SymbolName,
const char *SectionName);
@@ -1118,7 +1222,7 @@ private:
/// implementation. The return value has type MethodListnfABITy.
llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// EmitIvarList - Emit the ivar list for the given
/// implementation. If ForClass is true the list of class ivars
/// (i.e. metaclass ivars) is emitted, otherwise the list of
@@ -1267,6 +1371,9 @@ public:
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
@@ -1279,12 +1386,20 @@ public:
return ObjCTypes.getSetPropertyFn();
}
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+ }
+
virtual llvm::Constant *GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
virtual llvm::Constant *GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
+ virtual llvm::Constant *GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
virtual llvm::Constant *EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
@@ -1325,13 +1440,13 @@ public:
/// value.
struct NullReturnState {
llvm::BasicBlock *NullBB;
-
- NullReturnState() : NullBB(0) {}
+ llvm::BasicBlock *callBB;
+ NullReturnState() : NullBB(0), callBB(0) {}
void init(CodeGenFunction &CGF, llvm::Value *receiver) {
// Make blocks for the null-init and call edges.
NullBB = CGF.createBasicBlock("msgSend.nullinit");
- llvm::BasicBlock *callBB = CGF.createBasicBlock("msgSend.call");
+ callBB = CGF.createBasicBlock("msgSend.call");
// Check for a null receiver and, if there is one, jump to the
// null-init test.
@@ -1342,8 +1457,16 @@ struct NullReturnState {
CGF.EmitBlock(callBB);
}
- RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType) {
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
if (!NullBB) return result;
+
+ llvm::Value *NullInitPtr = 0;
+ if (result.isScalar() && !resultType->isVoidType()) {
+ NullInitPtr = CGF.CreateTempAlloca(result.getScalarVal()->getType());
+ CGF.Builder.CreateStore(result.getScalarVal(), NullInitPtr);
+ }
// Finish the call path.
llvm::BasicBlock *contBB = CGF.createBasicBlock("msgSend.cont");
@@ -1351,13 +1474,54 @@ struct NullReturnState {
// Emit the null-init block and perform the null-initialization there.
CGF.EmitBlock(NullBB);
- assert(result.isAggregate() && "null init of non-aggregate result?");
- CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+
+ // Release consumed arguments along the null-receiver path.
+ if (Method) {
+ CallArgList::const_iterator I = CallArgs.begin();
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i, ++I) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->RV;
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), true);
+ }
+ }
+ }
+
+ if (result.isScalar()) {
+ if (NullInitPtr)
+ CGF.EmitNullInitialization(NullInitPtr, resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return NullInitPtr ? RValue::get(CGF.Builder.CreateLoad(NullInitPtr))
+ : result;
+ }
+
+ if (!resultType->isAnyComplexType()) {
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return result;
+ }
- // Jump to the continuation block.
+ // _Complex type
+ // FIXME. Now easy to handle any other scalar type whose result is returned
+ // in memory due to ABI limitations.
CGF.EmitBlock(contBB);
-
- return result;
+ CodeGenFunction::ComplexPairTy CallCV = result.getComplexVal();
+ llvm::Type *MemberType = CallCV.first->getType();
+ llvm::Constant *ZeroCV = llvm::Constant::getNullValue(MemberType);
+ // Create phi instruction for scalar complex value.
+ llvm::PHINode *PHIReal = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIReal->addIncoming(ZeroCV, NullBB);
+ PHIReal->addIncoming(CallCV.first, callBB);
+ llvm::PHINode *PHIImag = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIImag->addIncoming(ZeroCV, NullBB);
+ PHIImag->addIncoming(CallCV.second, callBB);
+ return RValue::getComplex(PHIReal, PHIImag);
}
};
@@ -1428,7 +1592,6 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
- return 0;
}
/// Generate a constant CFString object.
@@ -1452,11 +1615,15 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
llvm::Constant *CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
- return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
+ return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
CGM.GetAddrOfConstantString(SL));
}
+enum {
+ kCFTaggedObjectID_Integer = (1 << 1) + 1
+};
+
/// Generates a message send where the super is the receiver. This is
/// a message send to self with special delivery semantics indicating
/// which class's method should be called.
@@ -1553,11 +1720,8 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
- llvm::FunctionType *FTy =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+ // If we're calling a method, use the formal signature.
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
if (Method)
assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
@@ -1567,20 +1731,38 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
NullReturnState nullReturn;
llvm::Constant *Fn = NULL;
- if (CGM.ReturnTypeUsesSRet(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
if (!IsSuper) nullReturn.init(CGF, Arg0);
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
} else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
: ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
+ : ObjCTypes.getSendFp2retFn(IsSuper);
} else {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
- Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
- RValue rvalue = CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
- return nullReturn.complete(CGF, rvalue, ResultType);
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && Method)
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, Arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs);
+ return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
+ requiresnullCheck ? Method : 0);
}
static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
@@ -1590,6 +1772,10 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
return Qualifiers::Weak;
+ // check for __unsafe_unretained
+ if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
+ return Qualifiers::GCNone;
+
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
return Qualifiers::Strong;
@@ -1601,11 +1787,10 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
- llvm::Constant *nullPtr =
- llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
- !CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
@@ -1673,10 +1858,10 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
- if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n block variable layout for block: ");
const unsigned char *s = (unsigned char*)BitMap.c_str();
- for (unsigned i = 0; i < BitMap.size(); i++)
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
else
@@ -1694,7 +1879,7 @@ llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
- ObjCTypes.ExternalProtocolPtrTy);
+ ObjCTypes.getExternalProtocolPtrTy());
}
void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1729,12 +1914,16 @@ struct _objc__method_prototype_list *class_methods
See EmitProtocolExtension().
*/
llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
- llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
// Early exit if a defining object has already been generated.
if (Entry && Entry->hasInitializer())
return Entry;
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
// FIXME: I don't understand why gcc generates this, or where it is
// resolved. Investigate. Its also wasteful to look this up over and over.
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
@@ -1742,6 +1931,7 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
// Construct method lists.
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
for (ObjCProtocolDecl::instmeth_iterator
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
@@ -1751,8 +1941,10 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
@@ -1765,26 +1957,30 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
- std::vector<llvm::Constant*> Values(5);
- Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
- Values[1] = GetClassName(PD->getIdentifier());
- Values[2] =
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[] = {
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getIdentifier()),
EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
PD->protocol_begin(),
- PD->protocol_end());
- Values[3] =
+ PD->protocol_end()),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
"__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[4] =
+ InstanceMethods),
EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
"__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+ ClassMethods)
+ };
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values);
@@ -1801,6 +1997,8 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
+
+ Protocols[PD->getIdentifier()] = Entry;
}
CGM.AddUsedGlobal(Entry);
@@ -1833,31 +2031,34 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
struct objc_method_description_list *optional_instance_methods;
struct objc_method_description_list *optional_class_methods;
struct objc_property_list *instance_properties;
+ const char ** extendedMethodTypes;
};
*/
llvm::Constant *
CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
- const ConstantVector &OptInstanceMethods,
- const ConstantVector &OptClassMethods) {
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt) {
uint64_t Size =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
- std::vector<llvm::Constant*> Values(4);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[1] =
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ PD->getName(),
"__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods);
- Values[2] =
+ OptInstanceMethods),
EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
"__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods);
- Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(),
- 0, PD, ObjCTypes);
+ OptClassMethods),
+ EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), 0, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)
+ };
// Return null if no extension bits are used.
if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
- Values[3]->isNullValue())
+ Values[3]->isNullValue() && Values[4]->isNullValue())
return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
llvm::Constant *Init =
@@ -1871,16 +2072,16 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
/*
struct objc_protocol_list {
- struct objc_protocol_list *next;
- long count;
- Protocol *list[];
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
};
*/
llvm::Constant *
CGObjCMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- std::vector<llvm::Constant*> ProtocolRefs;
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
for (; begin != end; ++begin)
ProtocolRefs.push_back(GetProtocolRef(*begin));
@@ -1909,12 +2110,12 @@ CGObjCMac::EmitProtocolList(Twine Name,
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
-void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- std::vector<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *PROTO,
- const ObjCCommonTypesHelper &ObjCTypes) {
- std::vector<llvm::Constant*> Prop(2);
+void CGObjCCommonMac::
+PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
E = PROTO->protocol_end(); P != E; ++P)
PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
@@ -1923,36 +2124,40 @@ void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierI
const ObjCPropertyDecl *PD = *I;
if (!PropertySet.insert(PD->getIdentifier()))
continue;
- Prop[0] = GetPropertyName(PD->getIdentifier());
- Prop[1] = GetPropertyTypeString(PD, Container);
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
}
}
/*
struct _objc_property {
- const char * const name;
- const char * const attributes;
+ const char * const name;
+ const char * const attributes;
};
struct _objc_property_list {
- uint32_t entsize; // sizeof (struct _objc_property)
- uint32_t prop_count;
- struct _objc_property[prop_count];
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
};
*/
llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
const Decl *Container,
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes) {
- std::vector<llvm::Constant*> Properties, Prop(2);
+ llvm::SmallVector<llvm::Constant*, 16> Properties;
llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
E = OCD->prop_end(); I != E; ++I) {
const ObjCPropertyDecl *PD = *I;
PropertySet.insert(PD->getIdentifier());
- Prop[0] = GetPropertyName(PD->getIdentifier());
- Prop[1] = GetPropertyTypeString(PD, Container);
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
Prop));
}
@@ -1993,6 +2198,26 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
+llvm::Constant *
+CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ // Return null for empty list.
+ if (MethodTypes.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy);
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ MethodTypes.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" : 0,
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+}
+
/*
struct objc_method_description_list {
int count;
@@ -2001,11 +2226,11 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
*/
llvm::Constant *
CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- std::vector<llvm::Constant*> Desc(2);
- Desc[0] =
+ llvm::Constant *Desc[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Desc[1] = GetMethodVarType(MD);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD)
+ };
if (!Desc[1])
return 0;
@@ -2013,9 +2238,9 @@ CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
Desc);
}
-llvm::Constant *CGObjCMac::EmitMethodDescList(Twine Name,
- const char *Section,
- const ConstantVector &Methods) {
+llvm::Constant *
+CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
@@ -2054,11 +2279,11 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
- llvm::SmallString<256> ExtName;
+ SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
<< OCD->getName();
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCCategoryImplDecl::instmeth_iterator
i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
// Instance methods should always be defined.
@@ -2164,7 +2389,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCImplementationDecl::instmeth_iterator
i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
// Instance methods should always be defined.
@@ -2244,7 +2469,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- const ConstantVector &Methods) {
+ ArrayRef<llvm::Constant*> Methods) {
unsigned Flags = eClassFlags_Meta;
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
@@ -2384,19 +2609,19 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
/*
struct objc_ivar {
- char *ivar_name;
- char *ivar_type;
- int ivar_offset;
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
};
struct objc_ivar_list {
- int ivar_count;
- struct objc_ivar list[count];
+ int ivar_count;
+ struct objc_ivar list[count];
};
*/
llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
bool ForClass) {
- std::vector<llvm::Constant*> Ivars, Ivar(3);
+ std::vector<llvm::Constant*> Ivars;
// When emitting the root class GCC emits ivar entries for the
// actual class structure. It is not clear if we need to follow this
@@ -2413,10 +2638,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- Ivar[0] = GetMethodVarName(IVD->getIdentifier());
- Ivar[1] = GetMethodVarType(IVD);
- Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
- ComputeIvarBaseOffset(CGM, OID, IVD));
+ llvm::Constant *Ivar[] = {
+ GetMethodVarName(IVD->getIdentifier()),
+ GetMethodVarType(IVD),
+ llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD))
+ };
Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
}
@@ -2465,18 +2692,18 @@ llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
if (!Fn)
return 0;
- std::vector<llvm::Constant*> Method(3);
- Method[0] =
+ llvm::Constant *Method[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Method[1] = GetMethodVarType(MD);
- Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
}
llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods) {
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
@@ -2495,12 +2722,12 @@ llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
GetNameForMethod(OMD, CD, Name);
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
- Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
llvm::Function *Method =
llvm::Function::Create(MethodTy,
llvm::GlobalValue::InternalLinkage,
@@ -2544,6 +2771,11 @@ llvm::Constant *CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
+llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+}
+
llvm::Constant *CGObjCMac::GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -2551,6 +2783,10 @@ llvm::Constant *CGObjCMac::GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
+llvm::Constant *CGObjCMac::GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+
llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -2959,6 +3195,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::CallInst *SetJmpResult =
CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
// If setjmp returned 0, enter the protected block; otherwise,
// branch to the handler.
@@ -3024,6 +3261,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
"setjmp.result");
SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
llvm::Value *Threw =
CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
@@ -3379,41 +3617,48 @@ enum ImageInfoFlags {
void CGObjCCommonMac::EmitImageInfo() {
unsigned version = 0; // Version is unused?
- unsigned flags = 0;
-
- // FIXME: Fix and continue?
- if (CGM.getLangOptions().getGC() != LangOptions::NonGC)
- flags |= eImageInfo_GarbageCollected;
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
- flags |= eImageInfo_GCOnly;
-
- // We never allow @synthesize of a superclass property.
- flags |= eImageInfo_CorrectedSynthesize;
-
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
-
- // Emitted as int[2];
- llvm::Constant *values[2] = {
- llvm::ConstantInt::get(Int32Ty, version),
- llvm::ConstantInt::get(Int32Ty, flags)
- };
- llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2);
-
- const char *Section;
- if (ObjCABI == 1)
- Section = "__OBJC, __image_info,regular";
- else
- Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
- llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
- llvm::ConstantArray::get(AT, values),
- Section,
- 0,
- true);
- GV->setConstant(true);
+ const char *Section = (ObjCABI == 1) ?
+ "__OBJC, __image_info,regular" :
+ "__DATA, __objc_imageinfo, regular, no_dead_strip";
+
+ // Generate module-level named metadata to convey this information to the
+ // linker and code-gen.
+ llvm::Module &Mod = CGM.getModule();
+
+ // Add the ObjC ABI version to the module flags.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version",
+ version);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
+ llvm::MDString::get(VMContext,Section));
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // Non-GC overrides those files which specify GC.
+ Mod.addModuleFlag(llvm::Module::Override,
+ "Objective-C Garbage Collection", (uint32_t)0);
+ } else {
+ // Add the ObjC garbage collection value.
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ eImageInfo_GarbageCollected);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // Add the ObjC GC Only value.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only",
+ eImageInfo_GCOnly);
+
+ // Require that GC be specified and set to eImageInfo_GarbageCollected.
+ llvm::Value *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ eImageInfo_GarbageCollected)
+ };
+ Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ }
}
-
// struct objc_module {
// unsigned long version;
// unsigned long size;
@@ -3427,12 +3672,13 @@ static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
- std::vector<llvm::Constant*> Values(4);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
- // This used to be the filename, now it is unused. <rdr://4327263>
- Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
- Values[3] = EmitModuleSymbols();
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
+ llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ GetClassName(&CGM.getContext().Idents.get("")),
+ EmitModuleSymbols()
+ };
CreateMetadataVar("\01L_OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
"__OBJC,__module_info,regular,no_dead_strip",
@@ -3455,7 +3701,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
// The runtime expects exactly the list of defined classes followed
// by the list of defined categories, in a single array.
- std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
for (unsigned i=0; i<NumClasses; i++)
Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
ObjCTypes.Int8PtrTy);
@@ -3466,7 +3712,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
Values[4] =
llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- NumClasses + NumCategories),
+ Symbols.size()),
Symbols);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
@@ -3531,8 +3777,8 @@ llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantArray::get(VMContext,
- Ident->getNameStart()),
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
((ObjCABI == 2) ?
"__TEXT,__objc_classname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3575,7 +3821,7 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const SmallVectorImpl<const FieldDecl*> &RecFields,
+ ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion) {
bool IsUnion = (RD && RD->isUnion());
@@ -3593,7 +3839,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
return;
unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
- if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
+ if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
const FieldDecl *FirstField = RecFields[0];
FirstFieldDelta =
ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
@@ -3740,9 +3986,9 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
/// the given argument BitMap string container. Routine reads
/// two containers, IvarsInfo and SkipIvars which are assumed to be
/// filled already by the caller.
-llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
unsigned int WordsToScan, WordsToSkip;
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
// Build the string of skip/scan nibbles
SmallVector<SKIP_SCAN, 32> SkipScanIvars;
@@ -3857,7 +4103,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
llvm::GlobalVariable * Entry =
CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
((ObjCABI == 2) ?
"__TEXT,__objc_classname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3886,14 +4132,14 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool ForStrongLayout) {
bool hasUnion = false;
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
- !CGM.getLangOptions().ObjCAutoRefCount)
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
const ObjCInterfaceDecl *OI = OMD->getClassInterface();
SmallVector<const FieldDecl*, 32> RecFields;
- if (CGM.getLangOptions().ObjCAutoRefCount) {
+ if (CGM.getLangOpts().ObjCAutoRefCount) {
for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
RecFields.push_back(cast<FieldDecl>(IVD));
@@ -3925,12 +4171,12 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
- if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
OMD->getClassInterface()->getName().data());
const unsigned char *s = (unsigned char*)BitMap.c_str();
- for (unsigned i = 0; i < BitMap.size(); i++)
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
else
@@ -3943,10 +4189,10 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
- // FIXME: Avoid std::string copying.
+ // FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
- llvm::ConstantArray::get(VMContext, Sel.getAsString()),
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
((ObjCABI == 2) ?
"__TEXT,__objc_methname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3968,7 +4214,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantArray::get(VMContext, TypeStr),
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ?
"__TEXT,__objc_methtype,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3977,18 +4223,17 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
return getConstantGEP(VMContext, Entry, 0, 0);
}
-llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended) {
std::string TypeStr;
- if (CGM.getContext().getObjCEncodingForMethodDecl(
- const_cast<ObjCMethodDecl*>(D),
- TypeStr))
+ if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
return 0;
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantArray::get(VMContext, TypeStr),
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ?
"__TEXT,__objc_methtype,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -4003,8 +4248,8 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
- llvm::ConstantArray::get(VMContext,
- Ident->getNameStart()),
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
"__TEXT,__cstring,cstring_literals",
1, true);
@@ -4030,7 +4275,7 @@ void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
<< '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- OS << '(' << CID << ')';
+ OS << '(' << *CID << ')';
OS << ' ' << D->getSelector().getAsString() << ']';
}
@@ -4044,7 +4289,7 @@ void CGObjCMac::FinishModule() {
if (I->second->hasInitializer())
continue;
- std::vector<llvm::Constant*> Values(5);
+ llvm::Constant *Values[5];
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
Values[1] = GetClassName(I->first);
Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
@@ -4062,7 +4307,7 @@ void CGObjCMac::FinishModule() {
//
// FIXME: It would be nice if we had an LLVM construct for this.
if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
- llvm::SmallString<256> Asm;
+ SmallString<256> Asm;
Asm += CGM.getModule().getModuleInlineAsm();
if (!Asm.empty() && Asm.back() != '\n')
Asm += '\n';
@@ -4077,7 +4322,7 @@ void CGObjCMac::FinishModule() {
OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
}
- for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) {
+ for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
<< "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
}
@@ -4096,7 +4341,8 @@ CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
/* *** */
ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
- : VMContext(cgm.getLLVMContext()), CGM(cgm) {
+ : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(0)
+{
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -4104,17 +4350,13 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
IntTy = Types.ConvertType(Ctx.IntTy);
LongTy = Types.ConvertType(Ctx.LongTy);
LongLongTy = Types.ConvertType(Ctx.LongLongTy);
- Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrPtrTy = CGM.Int8PtrPtrTy;
ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
- // FIXME: It would be nice to unify this with the opaque type, so that the IR
- // comes out a bit cleaner.
- llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
- ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
-
// I'm not sure I like this. The implicit coordination is a bit
// gross. We should solve this in a reasonable fashion because this
// is a pretty common task (match some runtime data structure with
@@ -4206,12 +4448,13 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_method_description_list *optional_instance_methods;
// struct _objc_method_description_list *optional_class_methods;
// struct _objc_property_list *instance_properties;
+ // const char ** extendedMethodTypes;
// }
ProtocolExtensionTy =
llvm::StructType::create("struct._objc_protocol_extension",
IntTy, MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy, PropertyListPtrTy,
- NULL);
+ Int8PtrPtrTy, NULL);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
@@ -4349,14 +4592,12 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
uint64_t SetJmpBufferSize = 18;
// Exceptions
- llvm::Type *StackPtrTy = llvm::ArrayType::get(
- llvm::Type::getInt8PtrTy(VMContext), 4);
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4);
ExceptionDataTy =
llvm::StructType::create("struct._objc_exception_data",
- llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
- StackPtrTy, NULL);
+ llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
+ StackPtrTy, NULL);
}
@@ -4384,6 +4625,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// const struct _prop_list_t * properties;
// const uint32_t size; // sizeof(struct _protocol_t)
// const uint32_t flags; // = 0
+ // const char ** extendedMethodTypes;
// }
// Holder for struct _protocol_list_t *
@@ -4395,7 +4637,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
llvm::PointerType::getUnqual(ProtocolListnfABITy),
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
- PropertyListPtrTy, IntTy, IntTy, NULL);
+ PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
+ NULL);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -4555,23 +4798,22 @@ llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
return NULL;
}
-void CGObjCNonFragileABIMac::AddModuleClassList(const
- std::vector<llvm::GlobalValue*>
- &Container,
- const char *SymbolName,
- const char *SectionName) {
+void CGObjCNonFragileABIMac::
+AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName) {
unsigned NumClasses = Container.size();
if (!NumClasses)
return;
- std::vector<llvm::Constant*> Symbols(NumClasses);
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
for (unsigned i=0; i<NumClasses; i++)
Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
ObjCTypes.Int8PtrTy);
- llvm::Constant* Init =
+ llvm::Constant *Init =
llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- NumClasses),
+ Symbols.size()),
Symbols);
llvm::GlobalVariable *GV =
@@ -4593,14 +4835,14 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
"\01L_OBJC_LABEL_CLASS_$",
"__DATA, __objc_classlist, regular, no_dead_strip");
- for (unsigned i = 0; i < DefinedClasses.size(); i++) {
+ for (unsigned i = 0, e = DefinedClasses.size(); i < e; i++) {
llvm::GlobalValue *IMPLGV = DefinedClasses[i];
if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
continue;
IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
}
- for (unsigned i = 0; i < DefinedMetaClasses.size(); i++) {
+ for (unsigned i = 0, e = DefinedMetaClasses.size(); i < e; i++) {
llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
continue;
@@ -4631,8 +4873,6 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// At various points we've experimented with using vtable-based
// dispatch for all methods.
switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
- default:
- llvm_unreachable("Invalid dispatch method!");
case CodeGenOptions::Legacy:
return false;
case CodeGenOptions::NonLegacy:
@@ -4653,7 +4893,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is disabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGC() != LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
VTableDispatchMethods.insert(GetNullarySelector("retain"));
VTableDispatchMethods.insert(GetNullarySelector("release"));
VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
@@ -4669,7 +4909,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is enabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGC() != LangOptions::NonGC) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
VTableDispatchMethods.insert(GetNullarySelector("hash"));
VTableDispatchMethods.insert(GetUnarySelector("addObject"));
@@ -4721,9 +4961,9 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getNameAsString();
- std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ llvm::Constant *Values[10]; // 11 for 64bit targets!
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
flags |= CLS_COMPILED_BY_ARC;
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
@@ -4819,14 +5059,15 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
llvm::Constant *SuperClassGV,
llvm::Constant *ClassRoGV,
bool HiddenVisibility) {
- std::vector<llvm::Constant*> Values(5);
- Values[0] = IsAGV;
- Values[1] = SuperClassGV;
+ llvm::Constant *Values[] = {
+ IsAGV,
+ SuperClassGV,
+ ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
+ ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
+ ClassRoGV // &CLASS_RO_GV
+ };
if (!Values[1])
Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
- Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
- Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
- Values[4] = ClassRoGV; // &CLASS_RO_GV
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
Values);
llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
@@ -4987,7 +5228,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
//
llvm::Constant *Init =
llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
- ObjCTypes.ExternalProtocolPtrTy);
+ ObjCTypes.getExternalProtocolPtrTy());
std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getName();
@@ -5025,7 +5266,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
std::string ExtClassName(getClassSymbolPrefix() +
Interface->getNameAsString());
- std::vector<llvm::Constant*> Values(6);
+ llvm::Constant *Values[6];
Values[0] = GetClassName(OCD->getIdentifier());
// meta-class entry symbol
llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
@@ -5064,7 +5305,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
- llvm::SmallString<256> ExtName;
+ SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
<< OCD->getName();
Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
@@ -5110,12 +5351,12 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
if (!Fn)
return 0;
- std::vector<llvm::Constant*> Method(3);
- Method[0] =
+ llvm::Constant *Method[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Method[1] = GetMethodVarType(MD);
- Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
}
@@ -5126,9 +5367,10 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
/// struct _objc_method method_list[method_count];
/// }
///
-llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
- const char *Section,
- const ConstantVector &Methods) {
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
@@ -5215,7 +5457,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
const ObjCImplementationDecl *ID) {
- std::vector<llvm::Constant*> Ivars, Ivar(5);
+ std::vector<llvm::Constant*> Ivars;
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
@@ -5227,6 +5469,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
+ llvm::Constant *Ivar[5];
Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
ComputeIvarBaseOffset(CGM, ID, IVD));
Ivar[1] = GetMethodVarName(IVD->getIdentifier());
@@ -5304,21 +5547,27 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
/// const struct _prop_list_t * properties;
/// const uint32_t size; // sizeof(struct _protocol_t)
/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
/// }
/// @endcode
///
llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
const ObjCProtocolDecl *PD) {
- llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
// Early exit if a defining object has already been generated.
if (Entry && Entry->hasInitializer())
return Entry;
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
// Construct method lists.
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
for (ObjCProtocolDecl::instmeth_iterator
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
@@ -5328,8 +5577,10 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
@@ -5342,12 +5593,17 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
- std::vector<llvm::Constant*> Values(10);
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[11];
// isa is NULL
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
Values[1] = GetClassName(PD->getIdentifier());
@@ -5377,6 +5633,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ + PD->getName(),
+ MethodTypesExt, ObjCTypes);
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
Values);
@@ -5392,6 +5651,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Entry->setAlignment(
CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ Protocols[PD->getIdentifier()] = Entry;
}
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(Entry);
@@ -5422,14 +5683,14 @@ llvm::Constant *
CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- std::vector<llvm::Constant*> ProtocolRefs;
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
// Just return null for empty protocol lists
if (begin == end)
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
// FIXME: We shouldn't need to do this lookup here, should we?
- llvm::SmallString<256> TmpName;
+ SmallString<256> TmpName;
Name.toVector(TmpName);
llvm::GlobalVariable *GV =
CGM.getModule().getGlobalVariable(TmpName.str(), true);
@@ -5447,10 +5708,9 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
Values[0] =
llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
Values[1] =
- llvm::ConstantArray::get(
- llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
@@ -5473,7 +5733,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
llvm::Constant *
CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- std::vector<llvm::Constant*> Desc(3);
+ llvm::Constant *Desc[3];
Desc[0] =
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
ObjCTypes.SelectorPtrTy);
@@ -5499,8 +5759,13 @@ LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
+ if (llvm::LoadInst *LI = dyn_cast<llvm::LoadInst>(Offset))
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
- EmitIvarOffset(CGF, ID, Ivar));
+ Offset);
}
llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
@@ -5557,9 +5822,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
args.insert(args.end(), formalArgs.begin(), formalArgs.end());
- const CGFunctionInfo &fnInfo =
- CGM.getTypes().getFunctionInfo(resultType, args,
- FunctionType::ExtInfo());
+ MessageSendInfo MSI = getMessageSendInfo(method, resultType, args);
NullReturnState nullReturn;
@@ -5572,7 +5835,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// FIXME: don't use this for that.
llvm::Constant *fn = 0;
std::string messageRefName("\01l_");
- if (CGM.ReturnTypeUsesSRet(fnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
if (isSuper) {
fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
messageRefName += "objc_msgSendSuper2_stret_fixup";
@@ -5616,6 +5879,20 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
messageRef->setAlignment(16);
messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
}
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && method)
+ for (ObjCMethodDecl::param_const_iterator i = method->param_begin(),
+ e = method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
llvm::Value *mref =
CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
@@ -5626,14 +5903,11 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0);
callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
- bool variadic = method ? method->isVariadic() : false;
- llvm::FunctionType *fnType =
- CGF.getTypes().GetFunctionType(fnInfo, variadic);
- callee = CGF.Builder.CreateBitCast(callee,
- llvm::PointerType::getUnqual(fnType));
+ callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
- RValue result = CGF.EmitCall(fnInfo, callee, returnSlot, args);
- return nullReturn.complete(CGF, result, resultType);
+ RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType, formalArgs,
+ requiresnullCheck ? method : 0);
}
/// Generate code for a message send expression in the nonfragile abi.
@@ -5839,7 +6113,12 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
if (lval)
return Entry;
- return Builder.CreateLoad(Entry);
+ llvm::LoadInst* LI = Builder.CreateLoad(Entry);
+
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return LI;
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
@@ -6049,13 +6328,13 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
llvm::GlobalValue::ExternalLinkage,
0, VTableName);
- llvm::Value *VTableIdx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
- std::vector<llvm::Constant*> Values(3);
- Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx);
- Values[1] = GetClassName(ID->getIdentifier());
- Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Values[] = {
+ llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx),
+ GetClassName(ID->getIdentifier()),
+ GetClassGlobal(ClassName)
+ };
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
@@ -6069,7 +6348,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
ID->getIdentifier()->getName()));
}
- if (CGM.getLangOptions().getVisibilityMode() == HiddenVisibility)
+ if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
ObjCTypes.EHTypeTy));
@@ -6088,7 +6367,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CodeGen::CGObjCRuntime *
CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
- if (CGM.getLangOptions().ObjCNonFragileABI)
+ if (CGM.getLangOpts().ObjCNonFragileABI)
return new CGObjCNonFragileABIMac(CGM);
return new CGObjCMac(CGM);
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index ef426ce6ed9b..93700960df23 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -41,7 +41,7 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
// If we know have an implementation (and the ivar is in it) then
// look up in the implementation layout.
const ASTRecordLayout *RL;
- if (ID && ID->getClassInterface() == Container)
+ if (ID && declaresSameEntity(ID->getClassInterface(), Container))
RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
else
RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
@@ -85,7 +85,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *I8Ptr = CGF.Int8PtrTy;
QualType IvarTy = Ivar->getType();
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
@@ -93,7 +93,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
if (!Ivar->isBitField()) {
- LValue LV = CGF.MakeAddrLValue(V, IvarTy);
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
@@ -229,7 +229,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cast<llvm::CallInst>(Exn)->setDoesNotThrow();
}
- CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
if (endCatchFn) {
// Add a cleanup to leave the catch.
@@ -246,7 +246,24 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
- CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+
+ llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
+
+ switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, CatchParamAddr);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
+ break;
+ }
}
CGF.ObjCEHValueStack.push_back(Exn);
@@ -293,7 +310,7 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
// ARC release and lock-release cleanups.
const Expr *lockExpr = S.getSynchExpr();
llvm::Value *lock;
- if (CGF.getLangOptions().ObjCAutoRefCount) {
+ if (CGF.getLangOpts().ObjCAutoRefCount) {
lock = CGF.EmitARCRetainScalarExpr(lockExpr);
lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
} else {
@@ -310,3 +327,48 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
}
+
+/// Compute the pointer-to-function type to which a message send
+/// should be casted in order to correctly call the given method
+/// with the given arguments.
+///
+/// \param method - may be null
+/// \param resultType - the result type to use if there's no method
+/// \param argInfo - the actual arguments, including implicit ones
+CGObjCRuntime::MessageSendInfo
+CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs) {
+ // If there's a method, use information from that.
+ if (method) {
+ const CGFunctionInfo &signature =
+ CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
+
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(signature)->getPointerTo();
+
+ // If that's not variadic, there's no need to recompute the ABI
+ // arrangement.
+ if (!signature.isVariadic())
+ return MessageSendInfo(signature, signatureType);
+
+ // Otherwise, there is.
+ FunctionType::ExtInfo einfo = signature.getExtInfo();
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
+
+ return MessageSendInfo(argsInfo, signatureType);
+ }
+
+ // There's no method; just use a default CC.
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ // Derive the signature to call from that.
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
+ return MessageSendInfo(argsInfo, signatureType);
+}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 4fa47a740aaf..ccf4d4dfca56 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -63,6 +63,9 @@ namespace CodeGen {
/// Implements runtime-specific code generation functions.
class CGObjCRuntime {
protected:
+ CodeGen::CodeGenModule &CGM;
+ CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {}
+
// Utility functions for unified ivar access. These need to
// eventually be folded into other places (the structure layout
// code).
@@ -132,7 +135,7 @@ public:
/// Generate a constant string object.
virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
-
+
/// Generate a category. A category contains a list of methods (and
/// accompanying metadata) and a list of protocols.
virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
@@ -140,6 +143,9 @@ public:
/// Generate a class structure for this class.
virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+ /// Register an class alias.
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0;
+
/// Generate an Objective-C message send operation.
///
/// \param Method - The method being called, this may be null if synthesizing
@@ -196,10 +202,17 @@ public:
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
+ /// Return the runtime function for optimized setting properties.
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
+
// API for atomic copying of qualified aggregates in getter.
virtual llvm::Constant *GetGetStructFunction() = 0;
// API for atomic copying of qualified aggregates in setter.
virtual llvm::Constant *GetSetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates with non-trivial copy
+ // assignment (c++) in setter/getter.
+ virtual llvm::Constant *GetCppAtomicObjectFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
@@ -249,6 +262,19 @@ public:
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
+
+ struct MessageSendInfo {
+ const CGFunctionInfo &CallInfo;
+ llvm::PointerType *MessengerType;
+
+ MessageSendInfo(const CGFunctionInfo &callInfo,
+ llvm::PointerType *messengerType)
+ : CallInfo(callInfo), MessengerType(messengerType) {}
+ };
+
+ MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs);
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index fbdb2984830b..19973b46b7cd 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -26,8 +26,6 @@ class RTTIBuilder {
CodeGenModule &CGM; // Per-module state.
llvm::LLVMContext &VMContext;
- llvm::Type *Int8PtrTy;
-
/// Fields - The fields of the RTTI descriptor currently being built.
SmallVector<llvm::Constant *, 16> Fields;
@@ -65,8 +63,7 @@ class RTTIBuilder {
public:
RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
- VMContext(CGM.getModule().getContext()),
- Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
+ VMContext(CGM.getModule().getContext()) { }
// Pointer type info flags.
enum {
@@ -116,7 +113,7 @@ public:
llvm::GlobalVariable *
RTTIBuilder::GetAddrOfTypeName(QualType Ty,
llvm::GlobalVariable::LinkageTypes Linkage) {
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
@@ -125,7 +122,8 @@ RTTIBuilder::GetAddrOfTypeName(QualType Ty,
// We know that the mangled name of the type starts at index 4 of the
// mangled name of the typename, so we can just index into it in order to
// get the mangled name of the type.
- llvm::Constant *Init = llvm::ConstantArray::get(VMContext, Name.substr(4));
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ Name.substr(4));
llvm::GlobalVariable *GV =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
@@ -137,7 +135,7 @@ RTTIBuilder::GetAddrOfTypeName(QualType Ty,
llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
@@ -148,11 +146,12 @@ llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
if (!GV) {
// Create a new global variable.
- GV = new llvm::GlobalVariable(CGM.getModule(), Int8PtrTy, /*Constant=*/true,
+ GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, 0, Name);
}
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
@@ -195,10 +194,11 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::UInt128:
return true;
- case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
llvm_unreachable("asking for RRTI for a placeholder type!");
case BuiltinType::ObjCId:
@@ -206,9 +206,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::ObjCSel:
llvm_unreachable("FIXME: Objective-C types are unsupported!");
}
-
- // Silent gcc.
- return false;
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
}
static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
@@ -250,7 +249,7 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
ASTContext &Context = CGM.getContext();
// If RTTI is disabled, don't consider key functions.
- if (!Context.getLangOptions().RTTI) return false;
+ if (!Context.getLangOpts().RTTI) return false;
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
@@ -327,7 +326,7 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
return llvm::GlobalValue::InternalLinkage;
case ExternalLinkage:
- if (!CGM.getLangOptions().RTTI) {
+ if (!CGM.getLangOpts().RTTI) {
// RTTI is not enabled, which means that this type info struct is going
// to be used for exception handling. Give it linkonce_odr linkage.
return llvm::GlobalValue::LinkOnceODRLinkage;
@@ -335,6 +334,8 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->hasAttr<WeakAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
if (RD->isDynamicClass())
return CGM.getVTableLinkage(RD);
}
@@ -342,7 +343,7 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
return llvm::GlobalValue::LinkOnceODRLinkage;
}
- return llvm::GlobalValue::LinkOnceODRLinkage;
+ llvm_unreachable("Invalid linkage!");
}
// CanUseSingleInheritance - Return whether the given record decl has a "single,
@@ -479,7 +480,7 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
}
llvm::Constant *VTable =
- CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -487,7 +488,7 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
// The vtable address point is 2.
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two);
- VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
}
@@ -531,7 +532,7 @@ maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
GV->setLinkage(Linkage);
// Get the typename global.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
@@ -551,7 +552,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
@@ -561,7 +562,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
if (OldGV && !OldGV->isDeclaration()) {
maybeUpdateRTTILinkage(CGM, OldGV, Ty);
- return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
}
// Check if there is already an external RTTI descriptor for this type.
@@ -582,8 +583,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// And the name.
llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy));
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -705,7 +705,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
GV->setUnnamedAddr(true);
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
/// ComputeQualifierFlags - Compute the pointer type info flags from the
@@ -982,14 +982,11 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if (!ForEH && !getContext().getLangOptions().RTTI) {
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (!ForEH && !getContext().getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
- }
- if (ForEH && Ty->isObjCObjectPointerType() && !Features.NeXTRuntime) {
+ if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
return ObjCRuntime->GetEHType(Ty);
- }
return RTTIBuilder(*this).BuildTypeInfo(Ty);
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 6475ccac0389..1193e974e195 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -120,24 +120,29 @@ private:
bool LayoutFields(const RecordDecl *D);
/// Layout a single base, virtual or non-virtual
- void LayoutBase(const CXXRecordDecl *base,
+ bool LayoutBase(const CXXRecordDecl *base,
const CGRecordLayout &baseLayout,
CharUnits baseOffset);
/// LayoutVirtualBase - layout a single virtual base.
- void LayoutVirtualBase(const CXXRecordDecl *base,
+ bool LayoutVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset);
/// LayoutVirtualBases - layout the virtual bases of a record decl.
- void LayoutVirtualBases(const CXXRecordDecl *RD,
+ bool LayoutVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
+
+ /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
+ /// like MSVC.
+ bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
/// LayoutNonVirtualBase - layout a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *base,
+ bool LayoutNonVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset);
/// LayoutNonVirtualBases - layout the virtual bases of a record decl.
- void LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
/// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
@@ -526,6 +531,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
CharUnits unionAlign = CharUnits::Zero();
bool hasOnlyZeroSizedBitFields = true;
+ bool checkedFirstFieldZeroInit = false;
unsigned fieldNo = 0;
for (RecordDecl::field_iterator field = D->field_begin(),
@@ -537,6 +543,11 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
if (!fieldType)
continue;
+ if (field->getDeclName() && !checkedFirstFieldZeroInit) {
+ CheckZeroInitializable(field->getType());
+ checkedFirstFieldZeroInit = true;
+ }
+
hasOnlyZeroSizedBitFields = false;
CharUnits fieldAlign = CharUnits::fromQuantity(
@@ -565,6 +576,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
}
}
if (unionAlign.isZero()) {
+ (void)hasOnlyZeroSizedBitFields;
assert(hasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
unionAlign = CharUnits::One();
@@ -576,7 +588,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
AppendPadding(recordSize, unionAlign);
}
-void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
const CGRecordLayout &baseLayout,
CharUnits baseOffset) {
ResizeLastBaseFieldIfNecessary(baseOffset);
@@ -589,22 +601,18 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
LastLaidOutBase.Offset = NextFieldOffset;
LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
- // Fields and bases can be laid out in the tail padding of previous
- // bases. If this happens, we need to allocate the base as an i8
- // array; otherwise, we can use the subobject type. However,
- // actually doing that would require knowledge of what immediately
- // follows this base in the layout, so instead we do a conservative
- // approximation, which is to use the base subobject type if it
- // has the same LLVM storage size as the nvsize.
-
llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ if (getTypeAlignment(subobjectType) > Alignment)
+ return false;
+
AppendField(baseOffset, subobjectType);
+ return true;
}
-void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset) {
// Ignore empty bases.
- if (base->isEmpty()) return;
+ if (base->isEmpty()) return true;
const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
if (IsZeroInitializableAsBase) {
@@ -615,26 +623,51 @@ void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
baseLayout.isZeroInitializableAsBase();
}
- LayoutBase(base, baseLayout, baseOffset);
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
NonVirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
}
-void
+bool
CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset) {
// Ignore empty bases.
- if (base->isEmpty()) return;
+ if (base->isEmpty()) return true;
const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
if (IsZeroInitializable)
IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
- LayoutBase(base, baseLayout, baseOffset);
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
VirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ if (!RD->getNumVBases())
+ return true;
+
+ // The vbases list is uniqued and ordered by a depth-first
+ // traversal, which is what we need here.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+ return true;
}
/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
-void
+bool
CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -650,7 +683,8 @@ CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
continue;
CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
- LayoutVirtualBase(BaseDecl, vbaseOffset);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
}
if (!BaseDecl->getNumVBases()) {
@@ -658,32 +692,39 @@ CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
continue;
}
- LayoutVirtualBases(BaseDecl, Layout);
+ if (!LayoutVirtualBases(BaseDecl, Layout))
+ return false;
}
+ return true;
}
-void
+bool
CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout) {
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- // Check if we need to add a vtable pointer.
- if (RD->isDynamicClass()) {
- if (!PrimaryBase) {
- llvm::Type *FunctionType =
- llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
- /*isVarArg=*/true);
- llvm::Type *VTableTy = FunctionType->getPointerTo();
-
- assert(NextFieldOffset.isZero() &&
- "VTable pointer must come first!");
- AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
+ // If we have a primary base, lay it out first.
+ if (PrimaryBase) {
+ if (!Layout.isPrimaryBaseVirtual()) {
+ if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
} else {
- if (!Layout.isPrimaryBaseVirtual())
- LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
- else
- LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
+ if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
}
+
+ // Otherwise, add a vtable / vf-table if the layout says to do so.
+ } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
+ ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
+ : RD->isDynamicClass()) {
+ llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffset.isZero() &&
+ "VTable pointer must come first!");
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
}
// Layout the non-virtual bases.
@@ -699,8 +740,19 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
continue;
- LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
+ if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
+ return false;
+ }
+
+ // Add a vb-table pointer if the layout insists.
+ if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
+ CharUnits VBPtrOffset = Layout.getVBPtrOffset();
+ llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
+ AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
+ AppendField(VBPtrOffset, Vbptr);
}
+
+ return true;
}
bool
@@ -731,7 +783,6 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
FieldTypes.push_back(getByteArrayType(NumBytes));
}
-
BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
FieldTypes, "", Packed);
@@ -752,7 +803,8 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
if (RD)
- LayoutNonVirtualBases(RD, Layout);
+ if (!LayoutNonVirtualBases(RD, Layout))
+ return false;
unsigned FieldNo = 0;
const FieldDecl *LastFD = 0;
@@ -785,11 +837,19 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
return false;
}
- // And lay out the virtual bases.
- RD->getIndirectPrimaryBases(IndirectPrimaryBases);
- if (Layout.isPrimaryBaseVirtual())
- IndirectPrimaryBases.insert(Layout.getPrimaryBase());
- LayoutVirtualBases(RD, Layout);
+ // Lay out the virtual bases. The MS ABI uses a different
+ // algorithm here due to the lack of primary virtual bases.
+ if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+
+ if (!LayoutVirtualBases(RD, Layout))
+ return false;
+ } else {
+ if (!MSLayoutVirtualBases(RD, Layout))
+ return false;
+ }
}
// Append tail padding if necessary.
@@ -831,17 +891,24 @@ void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
assert(NextFieldOffset <= fieldOffset &&
"Incorrect field layout!");
- // Round up the field offset to the alignment of the field type.
- CharUnits alignedNextFieldOffset =
- NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ // Do nothing if we're already at the right offset.
+ if (fieldOffset == NextFieldOffset) return;
- if (alignedNextFieldOffset < fieldOffset) {
- // Even with alignment, the field offset is not at the right place,
- // insert padding.
- CharUnits padding = fieldOffset - NextFieldOffset;
+ // If we're not emitting a packed LLVM type, try to avoid adding
+ // unnecessary padding fields.
+ if (!Packed) {
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ assert(alignedNextFieldOffset <= fieldOffset);
- AppendBytes(padding);
+ // If that's the right offset, we're done.
+ if (alignedNextFieldOffset == fieldOffset) return;
}
+
+ // Otherwise we need explicit padding.
+ CharUnits padding = fieldOffset - NextFieldOffset;
+ AppendBytes(padding);
}
bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
@@ -905,7 +972,7 @@ void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
return;
// Can only have member pointers if we're compiling C++.
- if (!Types.getContext().getLangOptions().CPlusPlus)
+ if (!Types.getContext().getLangOpts().CPlusPlus)
return;
const Type *elementType = T->getBaseElementTypeUnsafe();
@@ -931,7 +998,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = 0;
- if (isa<CXXRecordDecl>(D)) {
+ if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
BaseTy = Builder.BaseSubobjectType;
if (!BaseTy) BaseTy = Ty;
}
@@ -950,7 +1017,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
RL->BitFields.swap(Builder.BitFields);
// Dump the layout, if requested.
- if (getContext().getLangOptions().DumpRecordLayouts) {
+ if (getContext().getLangOpts().DumpRecordLayouts) {
llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
llvm::errs() << "Record: ";
D->dump();
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index c56931bbc6fa..bf42dcb8e21d 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -73,6 +73,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::CXXCatchStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
+ case Stmt::MSDependentExistsStmtClass:
llvm_unreachable("invalid statement class to emit generically");
case Stmt::NullStmtClass:
case Stmt::CompoundStmtClass:
@@ -190,20 +191,13 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
- CGDebugInfo *DI = getDebugInfo();
- if (DI)
- DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
-
- // Keep track of the current cleanup stack depth.
- RunCleanupsScope Scope(*this);
+ // Keep track of the current cleanup stack depth, including debug scopes.
+ LexicalScope Scope(*this, S.getSourceRange());
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- if (DI)
- DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
-
RValue RV;
if (!GetLast)
RV = RValue::get(0);
@@ -771,7 +765,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(),
+ CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
@@ -783,7 +778,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
// As long as debug info is modeled with instructions, we have to ensure we
// have a place to insert here and write the stop point here.
- if (getDebugInfo() && HaveInsertPoint())
+ if (HaveInsertPoint())
EmitStopPoint(&S);
for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
@@ -876,6 +871,16 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
}
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // If there is no enclosing switch instance that we're aware of, then this
+ // case statement and its block can be elided. This situation only happens
+ // when we've constant-folded the switch, are emitting the constant case,
+ // and part of the constant case includes another case statement. For
+ // instance: switch (4) { case 4: do { case 5: } while (1); }
+ if (!SwitchInsn) {
+ EmitStmt(S.getSubStmt());
+ return;
+ }
+
// Handle case ranges.
if (S.getRHS()) {
EmitCaseStmtRange(S);
@@ -887,7 +892,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// If the body of the case is just a 'break', and if there was no fallthrough,
// try to not emit an empty block.
- if (isa<BreakStmt>(S.getSubStmt())) {
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
// Only do this optimization if there are no cleanups that need emitting.
@@ -1150,6 +1155,10 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
// See if we can constant fold the condition of the switch and therefore only
// emit the live case statement (if any) of the switch.
llvm::APInt ConstantCondValue;
@@ -1159,20 +1168,26 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
getContext())) {
RunCleanupsScope ExecutedScope(*this);
+ // At this point, we are no longer "within" a switch instance, so
+ // we can temporarily enforce this to ensure that any embedded case
+ // statements are not emitted.
+ SwitchInsn = 0;
+
// Okay, we can dead code eliminate everything except this case. Emit the
// specified series of statements and we're good.
for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
EmitStmt(CaseStmts[i]);
+
+ // Now we want to restore the saved switch instance so that nested
+ // switches continue to function properly
+ SwitchInsn = SavedSwitchInsn;
+
return;
}
}
llvm::Value *CondV = EmitScalarExpr(S.getCond());
- // Handle nested switch statements.
- llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
- llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
-
// Create basic block to hold stuff that comes after switch
// statement. We also need to create a default block now so that
// explicit case ranges tests can have a place to jump to on
@@ -1199,7 +1214,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// Update the default block in case explicit case range tests have
// been chained on top.
- SwitchInsn->setSuccessor(0, CaseRangeBlock);
+ SwitchInsn->setDefaultDest(CaseRangeBlock);
// If a default was never emitted:
if (!DefaultBlock->getParent()) {
@@ -1280,6 +1295,8 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
if (!Variable)
return Constraint;
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint;
AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
if (!Attr)
return Constraint;
@@ -1355,7 +1372,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
- const LangOptions &LangOpts = CGF.CGM.getLangOptions();
+ const LangOptions &LangOpts = CGF.CGM.getLangOpts();
// Add the location of the start of each subsequent line of the asm to the
// MDNode.
@@ -1490,6 +1507,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
InOutConstraints);
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -1548,7 +1570,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
}
- if (llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
@@ -1590,7 +1612,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::Type *ResultType;
if (ResultRegTypes.empty())
- ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ ResultType = VoidTy;
else if (ResultRegTypes.size() == 1)
ResultType = ResultRegTypes[0];
else
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
deleted file mode 100644
index 0387daeb654f..000000000000
--- a/lib/CodeGen/CGTemporaries.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This contains code dealing with C++ code generation of temporaries
-//
-//===----------------------------------------------------------------------===//
-
-#include "CodeGenFunction.h"
-using namespace clang;
-using namespace CodeGen;
-
-namespace {
- struct DestroyTemporary : EHScopeStack::Cleanup {
- const CXXDestructorDecl *dtor;
- llvm::Value *addr;
- DestroyTemporary(const CXXDestructorDecl *dtor, llvm::Value *addr)
- : dtor(dtor), addr(addr) {}
- void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- addr);
- }
- };
-}
-
-/// Emits all the code to cause the given temporary to be cleaned up.
-void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
- llvm::Value *Ptr) {
- pushFullExprCleanup<DestroyTemporary>(NormalAndEHCleanup,
- Temporary->getDestructor(),
- Ptr);
-}
-
-RValue
-CodeGenFunction::EmitExprWithCleanups(const ExprWithCleanups *E,
- AggValueSlot Slot) {
- RunCleanupsScope Scope(*this);
- return EmitAnyExpr(E->getSubExpr(), Slot);
-}
-
-LValue CodeGenFunction::EmitExprWithCleanupsLValue(const ExprWithCleanups *E) {
- RunCleanupsScope Scope(*this);
- return EmitLValue(E->getSubExpr());
-}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index ea7b8cb49794..98be872a5525 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -18,13 +18,11 @@
using namespace clang;
using namespace CodeGen;
-#define D1(x)
-
-llvm::Constant *GetAddrOfVTTVTable(CodeGenVTables &CGVT,
- const CXXRecordDecl *MostDerivedClass,
- const VTTVTable &VTable,
- llvm::GlobalVariable::LinkageTypes Linkage,
- llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+static llvm::Constant *
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
if (VTable.getBase() == MostDerivedClass) {
assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
@@ -45,8 +43,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()),
- *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
@@ -102,7 +99,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
Out.flush();
@@ -113,10 +110,8 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
llvm::GlobalVariable *GV =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index a306c857e5aa..17a053757e2b 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -52,7 +52,7 @@ bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
// If we're building with optimization, we always emit VTables since that
// allows for virtual function calls to be devirtualized.
// (We don't want to do this in -fapple-kext mode however).
- if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOptions().AppleKext)
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
return true;
return KeyFunction->hasBody();
@@ -63,7 +63,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
// Compute the mangled name.
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
@@ -83,9 +83,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
- llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
-
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
if (NonVirtualAdjustment) {
@@ -244,8 +242,8 @@ void CodeGenFunction::GenerateVarArgsThunk(
QualType ResultType = FPT->getResultType();
// Get the original function
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(FnInfo, /*IsVariadic*/true);
+ assert(FnInfo.isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
@@ -331,6 +329,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
SourceLocation());
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ CXXThisValue = CXXABIThisValue;
// Adjust the 'this' pointer if necessary.
llvm::Value *AdjustedThisPtr =
@@ -352,13 +351,13 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
// Get our callee.
llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
- FPT->isVariadic());
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
#ifndef NDEBUG
const CGFunctionInfo &CallFnInfo =
- CGM.getTypes().getFunctionInfo(ResultType, CallArgs, FPT->getExtInfo());
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
@@ -399,7 +398,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
bool UseAvailableExternallyLinkage)
{
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(GD);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
// FIXME: re-use FnInfo in this computation.
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
@@ -511,7 +510,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
unsigned NumVTableThunks) {
SmallVector<llvm::Constant *, 64> Inits;
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -571,8 +570,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
// We have a pure virtual member function.
if (!PureVirtualFn) {
llvm::FunctionType *Ty =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
PureVirtualFn =
CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
@@ -586,8 +584,8 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
VTableThunks[NextVTableThunkIndex].first == I) {
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
- Init = CGM.GetAddrOfThunk(GD, Thunk);
MaybeEmitThunkAvailableExternally(GD, Thunk);
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
NextVTableThunkIndex++;
} else {
@@ -622,15 +620,14 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
if (ShouldEmitVTableInThisTU(RD))
CGM.DeferredVTables.push_back(RD);
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
Out.flush();
StringRef Name = OutName.str();
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy,
+ llvm::ArrayType::get(CGM.Int8PtrTy,
VTContext.getVTableLayout(RD).getNumVTableComponents());
VTable =
@@ -668,7 +665,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
bool BaseIsVirtual,
llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
- llvm::OwningPtr<VTableLayout> VTLayout(
+ OwningPtr<VTableLayout> VTLayout(
VTContext.createConstructionVTableLayout(Base.getBase(),
Base.getBaseOffset(),
BaseIsVirtual, RD));
@@ -677,7 +674,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
AddressPoints = VTLayout->getAddressPoints();
// Get the mangled construction vtable name.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().
mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
@@ -685,9 +682,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
Out.flush();
StringRef Name = OutName.str();
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, VTLayout->getNumVTableComponents());
+ llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 489e600b3ddf..ac704e7dca40 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -16,6 +16,7 @@
#define CLANG_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
namespace llvm {
@@ -24,9 +25,8 @@ namespace llvm {
}
namespace clang {
- class ObjCPropertyRefExpr;
-
namespace CodeGen {
+ class AggValueSlot;
class CGBitFieldInfo;
/// RValue - This trivial value class is used to represent the result of an
@@ -105,9 +105,7 @@ class LValue {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
- ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- PropertyRef // This is an Objective-C property reference, use
- // getPropertyRefExpr
+ ExtVectorElt // This is an extended vector subset, use getExtVectorComp
} LVType;
llvm::Value *V;
@@ -121,9 +119,6 @@ class LValue {
// BitField start bit and size
const CGBitFieldInfo *BitFieldInfo;
-
- // Obj-C property reference expression
- const ObjCPropertyRefExpr *PropertyRefExpr;
};
QualType Type;
@@ -131,7 +126,8 @@ class LValue {
// 'const' is unused here
Qualifiers Quals;
- /// The alignment to use when accessing this lvalue.
+ // The alignment to use when accessing this lvalue. (For vector elements,
+ // this is the alignment of the whole vector.)
unsigned short Alignment;
// objective-c's ivar
@@ -156,12 +152,14 @@ class LValue {
llvm::MDNode *TBAAInfo;
private:
- void Initialize(QualType Type, Qualifiers Quals, unsigned Alignment = 0,
+ void Initialize(QualType Type, Qualifiers Quals,
+ CharUnits Alignment = CharUnits(),
llvm::MDNode *TBAAInfo = 0) {
this->Type = Type;
this->Quals = Quals;
- this->Alignment = Alignment;
- assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
+ this->Alignment = Alignment.getQuantity();
+ assert(this->Alignment == Alignment.getQuantity() &&
+ "Alignment exceeds allowed max!");
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
@@ -175,7 +173,6 @@ public:
bool isVectorElt() const { return LVType == VectorElt; }
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
- bool isPropertyRef() const { return LVType == PropertyRef; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -226,7 +223,8 @@ public:
unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
- unsigned getAlignment() const { return Alignment; }
+ CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
+ void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
@@ -256,18 +254,8 @@ public:
return *BitFieldInfo;
}
- // property ref lvalue
- llvm::Value *getPropertyRefBaseAddr() const {
- assert(isPropertyRef());
- return V;
- }
- const ObjCPropertyRefExpr *getPropertyRefExpr() const {
- assert(isPropertyRef());
- return PropertyRefExpr;
- }
-
static LValue MakeAddr(llvm::Value *address, QualType type,
- unsigned alignment, ASTContext &Context,
+ CharUnits alignment, ASTContext &Context,
llvm::MDNode *TBAAInfo = 0) {
Qualifiers qs = type.getQualifiers();
qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
@@ -280,22 +268,22 @@ public:
}
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
@@ -316,17 +304,9 @@ public:
return R;
}
- // FIXME: It is probably bad that we aren't emitting the target when we build
- // the lvalue. However, this complicates the code a bit, and I haven't figured
- // out how to make it go wrong yet.
- static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
- llvm::Value *Base) {
- LValue R;
- R.LVType = PropertyRef;
- R.V = Base;
- R.PropertyRefExpr = E;
- R.Initialize(QualType(), Qualifiers());
- return R;
+ RValue asAggregateRValue() const {
+ // FIMXE: Alignment
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -338,6 +318,8 @@ class AggValueSlot {
// Qualifiers
Qualifiers Quals;
+ unsigned short Alignment;
+
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
@@ -376,11 +358,8 @@ public:
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
- AggValueSlot AV;
- AV.Addr = 0;
- AV.Quals = Qualifiers();
- AV.DestructedFlag = AV.ObjCGCFlag = AV.ZeroedFlag = AV.AliasedFlag = false;
- return AV;
+ return forAddr(0, CharUnits(), Qualifiers(), IsNotDestructed,
+ DoesNotNeedGCBarriers, IsNotAliased);
}
/// forAddr - Make a slot for an aggregate value.
@@ -393,13 +372,15 @@ public:
/// for calling destructors on this object
/// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *addr, Qualifiers quals,
+ static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ Qualifiers quals,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
AV.Addr = addr;
+ AV.Alignment = align.getQuantity();
AV.Quals = quals;
AV.DestructedFlag = isDestructed;
AV.ObjCGCFlag = needsGC;
@@ -412,8 +393,8 @@ public:
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
- return forAddr(LV.getAddress(), LV.getQuals(),
- isDestructed, needsGC, isAliased, isZeroed);
+ return forAddr(LV.getAddress(), LV.getAlignment(),
+ LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
}
IsDestructed_t isExternallyDestructed() const {
@@ -445,14 +426,19 @@ public:
return Addr == 0;
}
+ CharUnits getAlignment() const {
+ return CharUnits::fromQuantity(Alignment);
+ }
+
IsAliased_t isPotentiallyAliased() const {
return IsAliased_t(AliasedFlag);
}
+ // FIXME: Alignment?
RValue asRValue() const {
return RValue::getAggregate(getAddr(), isVolatile());
}
-
+
void setZeroed(bool V = true) { ZeroedFlag = V; }
IsZeroed_t isZeroed() const {
return IsZeroed_t(ZeroedFlag);
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 5e674a8d0110..7b1dbce1a85e 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -4,6 +4,8 @@ set(LLVM_LINK_COMPONENTS
bitwriter
instrumentation
ipo
+ linker
+ vectorize
)
set(LLVM_USED_LIBS clangBasic clangAST clangFrontend)
@@ -37,7 +39,6 @@ add_clang_library(clangCodeGen
CGRecordLayoutBuilder.cpp
CGRTTI.cpp
CGStmt.cpp
- CGTemporaries.cpp
CGVTables.cpp
CGVTT.cpp
CodeGenAction.cpp
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 68dd5c94dc01..dd32167b8477 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/ASTConsumer.h"
@@ -18,9 +19,12 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Linker.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/IRReader.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
@@ -30,6 +34,7 @@ using namespace llvm;
namespace clang {
class BackendConsumer : public ASTConsumer {
+ virtual void anchor();
DiagnosticsEngine &Diags;
BackendAction Action;
const CodeGenOptions &CodeGenOpts;
@@ -40,9 +45,9 @@ namespace clang {
Timer LLVMIRGeneration;
- llvm::OwningPtr<CodeGenerator> Gen;
+ OwningPtr<CodeGenerator> Gen;
- llvm::OwningPtr<llvm::Module> TheModule;
+ OwningPtr<llvm::Module> TheModule, LinkModule;
public:
BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
@@ -50,7 +55,9 @@ namespace clang {
const TargetOptions &targetopts,
const LangOptions &langopts,
bool TimePasses,
- const std::string &infile, raw_ostream *OS,
+ const std::string &infile,
+ llvm::Module *LinkModule,
+ raw_ostream *OS,
LLVMContext &C) :
Diags(_Diags),
Action(action),
@@ -59,11 +66,17 @@ namespace clang {
LangOpts(langopts),
AsmOutStream(OS),
LLVMIRGeneration("LLVM IR Generation Time"),
- Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
+ LinkModule(LinkModule) {
llvm::TimePassesIsEnabled = TimePasses;
}
llvm::Module *takeModule() { return TheModule.take(); }
+ llvm::Module *takeLinkModule() { return LinkModule.take(); }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+ }
virtual void Initialize(ASTContext &Ctx) {
Context = &Ctx;
@@ -79,7 +92,7 @@ namespace clang {
LLVMIRGeneration.stopTimer();
}
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
Context->getSourceManager(),
"LLVM IR generation of declaration");
@@ -91,6 +104,8 @@ namespace clang {
if (llvm::TimePassesIsEnabled)
LLVMIRGeneration.stopTimer();
+
+ return true;
}
virtual void HandleTranslationUnit(ASTContext &C) {
@@ -111,7 +126,7 @@ namespace clang {
// Make sure IR generation is happy with the module. This is released by
// the module provider.
- Module *M = Gen->ReleaseModule();
+ llvm::Module *M = Gen->ReleaseModule();
if (!M) {
// The module has been released by IR gen on failures, do not double
// free.
@@ -122,6 +137,17 @@ namespace clang {
assert(TheModule.get() == M &&
"Unexpected module change during IR generation");
+ // Link LinkModule into this module if present, preserving its validity.
+ if (LinkModule) {
+ std::string ErrorMsg;
+ if (Linker::LinkModules(M, LinkModule.get(), Linker::PreserveSource,
+ &ErrorMsg)) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << LinkModule->getModuleIdentifier() << ErrorMsg;
+ return;
+ }
+ }
+
// Install an inline asm handler so that diagnostics get printed through
// our diagnostics hooks.
LLVMContext &Ctx = TheModule->getContext();
@@ -160,6 +186,8 @@ namespace clang {
void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
SourceLocation LocCookie);
};
+
+ void BackendConsumer::anchor() {}
}
/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
@@ -215,8 +243,17 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
if (LocCookie.isValid()) {
Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
- if (D.getLoc().isValid())
- Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) {
+ std::pair<unsigned, unsigned> Range = D.getRanges()[i];
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
return;
}
@@ -229,7 +266,8 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
//
CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
- : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ : Act(_Act), LinkModule(0),
+ VMContext(_VMContext ? _VMContext : new LLVMContext),
OwnsVMContext(!_VMContext) {}
CodeGenAction::~CodeGenAction() {
@@ -245,6 +283,10 @@ void CodeGenAction::EndSourceFileAction() {
if (!getCompilerInstance().hasASTConsumer())
return;
+ // If we were given a link module, release consumer's ownership of it.
+ if (LinkModule)
+ BEConsumer->takeLinkModule();
+
// Steal the module from the consumer.
TheModule.reset(BEConsumer->takeModule());
}
@@ -281,16 +323,40 @@ static raw_ostream *GetOutputStream(CompilerInstance &CI,
ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
BackendAction BA = static_cast<BackendAction>(Act);
- llvm::OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
if (BA != Backend_EmitNothing && !OS)
return 0;
+ llvm::Module *LinkModuleToUse = LinkModule;
+
+ // If we were not given a link module, and the user requested that one be
+ // loaded from bitcode, do so now.
+ const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
+ if (!LinkModuleToUse && !LinkBCFile.empty()) {
+ std::string ErrorStr;
+
+ llvm::MemoryBuffer *BCBuf =
+ CI.getFileManager().getBufferForFile(LinkBCFile, &ErrorStr);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+
+ LinkModuleToUse = getLazyBitcodeModule(BCBuf, *VMContext, &ErrorStr);
+ if (!LinkModuleToUse) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+ }
+
BEConsumer =
new BackendConsumer(BA, CI.getDiagnostics(),
CI.getCodeGenOpts(), CI.getTargetOpts(),
CI.getLangOpts(),
- CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
- *VMContext);
+ CI.getFrontendOpts().ShowTimers, InFile,
+ LinkModuleToUse, OS.take(), *VMContext);
return BEConsumer;
}
@@ -328,8 +394,17 @@ void CodeGenAction::ExecuteAction() {
StringRef Msg = Err.getMessage();
if (Msg.startswith("error: "))
Msg = Msg.substr(7);
+
+ // Escape '%', which is interpreted as a format character.
+ llvm::SmallString<128> EscapedMessage;
+ for (unsigned i = 0, e = Msg.size(); i != e; ++i) {
+ if (Msg[i] == '%')
+ EscapedMessage += '%';
+ EscapedMessage += Msg[i];
+ }
+
unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
- DiagnosticsEngine::Error, Msg);
+ DiagnosticsEngine::Error, EscapedMessage);
CI.getDiagnostics().Report(Loc, DiagID);
return;
@@ -348,20 +423,26 @@ void CodeGenAction::ExecuteAction() {
//
+void EmitAssemblyAction::anchor() { }
EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitAssembly, _VMContext) {}
+void EmitBCAction::anchor() { }
EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitBC, _VMContext) {}
+void EmitLLVMAction::anchor() { }
EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitLL, _VMContext) {}
+void EmitLLVMOnlyAction::anchor() { }
EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitNothing, _VMContext) {}
+void EmitCodeGenOnlyAction::anchor() { }
EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitMCNull, _VMContext) {}
+void EmitObjAction::anchor() { }
EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 8191f021da4a..06e90b664858 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -16,9 +16,7 @@
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
-#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -31,20 +29,29 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
- Target(CGM.getContext().getTargetInfo()), Builder(cgm.getModule().getContext()),
+ Target(CGM.getContext().getTargetInfo()),
+ Builder(cgm.getModule().getContext()),
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
- NormalCleanupDest(0), NextCleanupDestIndex(1),
- EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
+ LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
+ FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
- CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
- TrapBB(0) {
+ CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
+ CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
+ TerminateHandler(0), TrapBB(0) {
- CatchUndefined = getContext().getLangOptions().CatchUndefined;
+ CatchUndefined = getContext().getLangOpts().CatchUndefined;
CGM.getCXXABI().getMangleContext().startNewFunction();
}
+CodeGenFunction::~CodeGenFunction() {
+ // If there are any unclaimed block infos, go ahead and destroy them
+ // now. This can happen if IR-gen gets clever and skips evaluating
+ // something.
+ if (FirstBlockInfo)
+ destroyBlockInfos(FirstBlockInfo);
+}
+
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -222,8 +229,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
llvm::PointerType *PointerTy = Int8PtrTy;
llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
llvm::FunctionType *FunctionTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- ProfileFuncArgs, false);
+ llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
@@ -237,8 +243,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
}
void CodeGenFunction::EmitMCountInstrumentation() {
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
Target.getMCountName());
@@ -261,15 +266,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
- RE = FD->redecls_end(); RI != RE; ++RI)
- if (RI->isInlineSpecified()) {
- Fn->addFnAttr(llvm::Attribute::InlineHint);
- break;
- }
+ if (!CGM.getCodeGenOpts().NoInline)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
- if (getContext().getLangOptions().OpenCL) {
+ if (getContext().getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
if (FD->hasAttr<OpenCLKernelAttr>()) {
@@ -298,12 +304,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
- // FIXME: what is going on here and why does it ignore all these
- // interesting type properties?
+ unsigned NumArgs = 0;
+ QualType *ArgsArray = new QualType[Args.size()];
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ ArgsArray[NumArgs++] = (*i)->getType();
+ }
+
QualType FnType =
- getContext().getFunctionType(RetTy, 0, 0,
+ getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
FunctionProtoType::ExtProtoInfo());
+ delete[] ArgsArray;
+
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
@@ -328,7 +341,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Tell the epilog emitter to autorelease the result. We do this
// now so that various specialized functions can suppress it
// during their IR-generation.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
!CurFnInfo->isReturnsRetained() &&
RetTy->isObjCRetainableType())
AutoreleaseResult = true;
@@ -339,8 +352,27 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ // We're in a lambda; figure out the captures.
+ MD->getParent()->getCaptureFields(LambdaCaptureFields,
+ LambdaThisCaptureField);
+ if (LambdaThisCaptureField) {
+ // If this lambda captures this, load it.
+ LValue ThisLValue = EmitLValueForField(CXXABIThisValue,
+ LambdaThisCaptureField, 0);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
+ }
+ } else {
+ // Not in a lambda; just use 'this' from the method.
+ // FIXME: Should we generate a new load for each use of 'this'? The
+ // fast register allocator would be happier...
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
@@ -397,9 +429,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
- if (FD->getNumParams())
- for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
- Args.push_back(FD->getParamDecl(i));
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(FD->getParamDecl(i));
SourceRange BodyRange;
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
@@ -412,10 +443,21 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
- else if (getContext().getLangOptions().CUDA &&
+ else if (getContext().getLangOpts().CUDA &&
!CGM.getCodeGenOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
+ else if (isa<CXXConversionDecl>(FD) &&
+ cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
+ // The lambda conversion to block pointer is special; the semantics can't be
+ // expressed in the AST, so IRGen needs to special-case it.
+ EmitLambdaToBlockPointerBody(Args);
+ } else if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
+ // The lambda "__invoke" function is special, because it forwards or
+ // clones the body of the function call operator (but is actually static).
+ EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
+ }
else
EmitFunctionBody(Args);
@@ -505,15 +547,14 @@ bool CodeGenFunction::
ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
- Expr::EvalResult Result;
- if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
- Result.HasSideEffects)
+ llvm::APSInt Int;
+ if (!Cond->EvaluateAsInt(Int, getContext()))
return false; // Not foldable, not integer or not fully evaluatable.
-
+
if (CodeGenFunction::ContainsLabel(Cond))
return false; // Contains a label.
-
- ResultInt = Result.Val.getInt();
+
+ ResultInt = Int;
return true;
}
@@ -606,29 +647,24 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
}
if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
- // Handle ?: operator.
-
- // Just ignore GNU ?: extension.
- if (CondOp->getLHS()) {
- // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
- llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
- ConditionalEvaluation cond(*this);
- EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ ConditionalEvaluation cond(*this);
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
- cond.begin(*this);
- EmitBlock(LHSBlock);
- EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
- cond.end(*this);
+ cond.begin(*this);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
- cond.begin(*this);
- EmitBlock(RHSBlock);
- EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
- cond.end(*this);
+ cond.begin(*this);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
- return;
- }
+ return;
}
// Emit the code with the fully general case.
@@ -696,7 +732,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
- if (getContext().getLangOptions().CPlusPlus) {
+ if (getContext().getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
return;
@@ -916,19 +952,19 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// We're going to walk down into the type and look for VLA
// expressions.
- type = type.getCanonicalType();
do {
assert(type->isVariablyModifiedType());
const Type *ty = type.getTypePtr();
switch (ty->getTypeClass()) {
+
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
-#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
-#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
- llvm_unreachable("unexpected dependent or non-canonical type!");
+ llvm_unreachable("unexpected dependent type!");
// These types are never variably-modified.
case Type::Builtin:
@@ -937,6 +973,8 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::ExtVector:
case Type::Record:
case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
@@ -986,11 +1024,31 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
break;
}
- case Type::FunctionProto:
+ case Type::FunctionProto:
case Type::FunctionNoProto:
type = cast<FunctionType>(ty)->getResultType();
break;
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
case Type::Atomic:
type = cast<AtomicType>(ty)->getValueType();
break;
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 157623da8fdd..3e0cd146256f 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -25,8 +25,10 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Debug.h"
#include "CodeGenModule.h"
#include "CGBuilder.h"
+#include "CGDebugInfo.h"
#include "CGValue.h"
namespace llvm {
@@ -41,8 +43,8 @@ namespace llvm {
}
namespace clang {
- class APValue;
class ASTContext;
+ class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
@@ -69,7 +71,6 @@ namespace clang {
namespace CodeGen {
class CodeGenTypes;
- class CGDebugInfo;
class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
@@ -598,6 +599,9 @@ public:
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -609,6 +613,9 @@ public:
unsigned NextCleanupDestIndex;
+ /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
+ CGBlockInfo *FirstBlockInfo;
+
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock;
@@ -625,10 +632,6 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
- /// Set up the last cleaup that was pushed as a conditional
- /// full-expression cleanup.
- void initFullExprCleanup();
-
template <class T>
typename DominatingValue<T>::saved_type saveValueInCond(T value) {
return DominatingValue<T>::save(*this, value);
@@ -739,6 +742,10 @@ public:
initFullExprCleanup();
}
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -758,16 +765,27 @@ public:
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
/// stack.
- void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
/// ActivateCleanupBlock - Activates an initially-inactive cleanup.
/// Cannot be used to resurrect a deactivated cleanup.
- void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- CodeGenFunction& CGF;
EHScopeStack::stable_iterator CleanupStackDepth;
bool OldDidCallStackSave;
bool PerformCleanup;
@@ -775,10 +793,13 @@ public:
RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ protected:
+ CodeGenFunction& CGF;
+
public:
/// \brief Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
- : CGF(CGF), PerformCleanup(true)
+ : PerformCleanup(true), CGF(CGF)
{
CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
@@ -809,6 +830,41 @@ public:
}
};
+ class LexicalScope: protected RunCleanupsScope {
+ SourceRange Range;
+ bool PopDebugStack;
+
+ LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
+ LexicalScope &operator=(const LexicalScope &);
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
+ : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~LexicalScope() {
+ if (PopDebugStack) {
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ }
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ if (CGDebugInfo *DI = CGF.getDebugInfo()) {
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ PopDebugStack = false;
+ }
+ }
+ };
+
/// PopCleanupBlocks - Takes the old cleanup stack size and emits
/// the cleanup blocks that have been added.
@@ -881,6 +937,12 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != 0; }
+ void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ assert(isInConditionalBranch());
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(value, addr, &block->back());
+ }
+
/// An RAII object to record that we're evaluating a statement
/// expression.
class StmtExprEvaluation {
@@ -912,18 +974,91 @@ public:
public:
PeepholeProtection() : Inst(0) {}
- };
+ };
- /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
- class OpaqueValueMapping {
- CodeGenFunction &CGF;
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
const OpaqueValueExpr *OpaqueValue;
bool BoundLValue;
CodeGenFunction::PeepholeProtection Protection;
+ OpaqueValueMappingData(const OpaqueValueExpr *ov,
+ bool boundLValue)
+ : OpaqueValue(ov), BoundLValue(boundLValue) {}
+ public:
+ OpaqueValueMappingData() : OpaqueValue(0) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() ||
+ expr->getType()->isRecordType() ||
+ expr->getType()->isFunctionType();
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(CGF, ov, CGF.EmitLValue(e));
+ return bind(CGF, ov, CGF.EmitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ data.Protection = CGF.protectFromPeepholes(rv);
+
+ return data;
+ }
+
+ bool isValid() const { return OpaqueValue != 0; }
+ void clear() { OpaqueValue = 0; }
+
+ void unbind(CodeGenFunction &CGF) {
+ assert(OpaqueValue && "no data to unbind!");
+
+ if (BoundLValue) {
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ } else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ OpaqueValueMappingData Data;
+
public:
static bool shouldBindAsLValue(const Expr *expr) {
- return expr->isGLValue() || expr->getType()->isRecordType();
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
}
/// Build the opaque value mapping for the given conditional
@@ -933,75 +1068,34 @@ public:
///
OpaqueValueMapping(CodeGenFunction &CGF,
const AbstractConditionalOperator *op) : CGF(CGF) {
- if (isa<ConditionalOperator>(op)) {
- OpaqueValue = 0;
- BoundLValue = false;
+ if (isa<ConditionalOperator>(op))
+ // Leave Data empty.
return;
- }
const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
- init(e->getOpaqueValue(), e->getCommon());
+ Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
+ e->getCommon());
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
LValue lvalue)
- : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
- assert(opaqueValue && "no opaque value expression!");
- assert(shouldBindAsLValue(opaqueValue));
- initLValue(lvalue);
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
RValue rvalue)
- : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
- assert(opaqueValue && "no opaque value expression!");
- assert(!shouldBindAsLValue(opaqueValue));
- initRValue(rvalue);
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
}
void pop() {
- assert(OpaqueValue && "mapping already popped!");
- popImpl();
- OpaqueValue = 0;
+ Data.unbind(CGF);
+ Data.clear();
}
~OpaqueValueMapping() {
- if (OpaqueValue) popImpl();
- }
-
- private:
- void popImpl() {
- if (BoundLValue)
- CGF.OpaqueLValues.erase(OpaqueValue);
- else {
- CGF.OpaqueRValues.erase(OpaqueValue);
- CGF.unprotectFromPeepholes(Protection);
- }
- }
-
- void init(const OpaqueValueExpr *ov, const Expr *e) {
- OpaqueValue = ov;
- BoundLValue = shouldBindAsLValue(ov);
- assert(BoundLValue == shouldBindAsLValue(e)
- && "inconsistent expression value kinds!");
- if (BoundLValue)
- initLValue(CGF.EmitLValue(e));
- else
- initRValue(CGF.EmitAnyExpr(e));
- }
-
- void initLValue(const LValue &lv) {
- CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
- }
-
- void initRValue(const RValue &rv) {
- // Work around an extremely aggressive peephole optimization in
- // EmitScalarConversion which assumes that all other uses of a
- // value are extant.
- Protection = CGF.protectFromPeepholes(rv);
- CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
+ if (Data.isValid()) Data.unbind(CGF);
}
};
@@ -1046,7 +1140,7 @@ private:
};
SmallVector<BreakContinue, 8> BreakContinueStack;
- /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// SwitchInsn - This is nearest current switch instruction. It is null if
/// current context is not in a switch.
llvm::SwitchInst *SwitchInsn;
@@ -1073,7 +1167,8 @@ private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
- ImplicitParamDecl *CXXThisDecl;
+ ImplicitParamDecl *CXXABIThisDecl;
+ llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
/// CXXVTTDecl - When generating code for a base object constructor or
@@ -1099,6 +1194,7 @@ private:
public:
CodeGenFunction(CodeGenModule &cgm);
+ ~CodeGenFunction();
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const { return CGM.getContext(); }
@@ -1114,7 +1210,7 @@ public:
return CGM.getCodeGenOpts().OptimizationLevel == 0;
}
- const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
+ const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
/// Returns a pointer to the function's exception object and selector slot,
/// which is assigned in every landing pad.
@@ -1152,27 +1248,27 @@ public:
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
- Destroyer &destroyer);
+ Destroyer *destroyer);
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
- Destroyer &destroyer);
+ Destroyer *destroyer);
void pushDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type);
void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
- Destroyer &destroyer, bool useEHCleanupForArray);
- void emitDestroy(llvm::Value *addr, QualType type, Destroyer &destroyer,
+ Destroyer *destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(llvm::Constant *addr,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray);
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
- QualType type, Destroyer &destroyer,
+ QualType type, Destroyer *destroyer,
bool checkZeroLength, bool useEHCleanup);
- Destroyer &getDestroyer(QualType::DestructionKind destructionKind);
+ Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
/// Determines whether an EH cleanup is required to destroy a type
/// with the given destruction kind.
@@ -1182,9 +1278,9 @@ public:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
- return getLangOptions().Exceptions;
+ return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
- return getLangOptions().Exceptions &&
+ return getLangOpts().Exceptions &&
CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
}
llvm_unreachable("bad destruction kind");
@@ -1208,7 +1304,8 @@ public:
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl);
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
@@ -1218,7 +1315,8 @@ public:
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl);
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
bool IvarTypeWithAggrGCObjects(QualType Ty);
@@ -1227,6 +1325,8 @@ public:
//===--------------------------------------------------------------------===//
llvm::Value *EmitBlockLiteral(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+ static void destroyBlockInfos(CGBlockInfo *info);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
llvm::StructType *,
@@ -1235,10 +1335,16 @@ public:
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- const DeclMapTy &ldm);
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock);
llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
@@ -1253,10 +1359,7 @@ public:
}
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
- void AllocateBlockDecl(const BlockDeclRefExpr *E);
- llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
- return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
- }
+ void AllocateBlockDecl(const DeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
llvm::Type *BuildByRefType(const VarDecl *var);
@@ -1272,6 +1375,13 @@ public:
void EmitDestructorBody(FunctionArgList &Args);
void EmitFunctionBody(FunctionArgList &Args);
+ void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs);
+ void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
+ void EmitLambdaBlockInvokeBody();
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+
/// EmitReturnBlock - Emit the unified return block, trying to avoid its
/// emission when possible.
void EmitReturnBlock();
@@ -1290,6 +1400,9 @@ public:
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes);
+
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
@@ -1442,7 +1555,15 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
- LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
+ LValue MakeAddrLValue(llvm::Value *V, QualType T,
+ CharUnits Alignment = CharUnits()) {
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ CharUnits Alignment;
+ if (!T->isIncompleteType())
+ Alignment = getContext().getTypeAlignInChars(T);
return LValue::MakeAddr(V, T, Alignment, getContext(),
CGM.getTBAAInfo(T));
}
@@ -1470,7 +1591,9 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
+ CharUnits Alignment = getContext().getTypeAlignInChars(T);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1519,7 +1642,8 @@ public:
/// \param isVolatile - True iff either the source or the destination is
/// volatile.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- QualType EltTy, bool isVolatile=false);
+ QualType EltTy, bool isVolatile=false,
+ unsigned Alignment = 0);
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1677,7 +1801,8 @@ public:
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
llvm::Value *NewPtr, llvm::Value *NumElements);
- void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+ void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
+ llvm::Value *Ptr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -1688,6 +1813,10 @@ public:
llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+ void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
+ void EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init);
+
void EmitCheck(llvm::Value *, unsigned Size);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
@@ -1917,13 +2046,14 @@ public:
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo = 0);
+ llvm::MDNode *TBAAInfo = 0, bool isInit=false);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
- /// l-value.
- void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
@@ -1931,15 +2061,12 @@ public:
RValue EmitLoadOfLValue(LValue V);
RValue EmitLoadOfExtVectorElementLValue(LValue V);
RValue EmitLoadOfBitfieldLValue(LValue LV);
- RValue EmitLoadOfPropertyRefLValue(LValue LV,
- ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
- void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
/// EmitStoreThroughLValue.
@@ -1977,6 +2104,40 @@ public:
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+ class ConstantEmission {
+ llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
+ ConstantEmission(llvm::Constant *C, bool isReference)
+ : ValueAndIsReference(C, isReference) {}
+ public:
+ ConstantEmission() {}
+ static ConstantEmission forReference(llvm::Constant *C) {
+ return ConstantEmission(C, true);
+ }
+ static ConstantEmission forValue(llvm::Constant *C) {
+ return ConstantEmission(C, false);
+ }
+
+ operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
+
+ bool isReference() const { return ValueAndIsReference.getInt(); }
+ LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
+ assert(isReference());
+ return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
+ refExpr->getType());
+ }
+
+ llvm::Constant *getValue() const {
+ assert(!isReference());
+ return ValueAndIsReference.getPointer();
+ }
+ };
+
+ ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+
+ RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
+ AggValueSlot slot = AggValueSlot::ignored());
+ LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
+
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForAnonRecordField(llvm::Value* Base,
@@ -1999,16 +2160,13 @@ public:
LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
unsigned CVRQualifiers);
- LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
-
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
- LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
+ LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
- LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
@@ -2098,12 +2256,18 @@ public:
llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
- llvm::Value *BuildVector(const SmallVectorImpl<llvm::Value*> &Ops);
+ llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
+ llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
+ llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects);
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
@@ -2243,7 +2407,8 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ bool PerformInit);
/// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
/// with the C++ runtime so that its destructor will be called at exit.
@@ -2255,7 +2420,8 @@ public:
/// possible to prove that an initialization will be done exactly
/// once, e.g. with a static local variable or a static data member
/// of a class template.
- void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
@@ -2263,26 +2429,32 @@ public:
llvm::Constant **Decls,
unsigned NumDecls);
- /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
+ /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
- void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::WeakVH,
- llvm::Constant*> > &DtorsAndObjects);
+ void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
- llvm::GlobalVariable *Addr);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
const Expr *Exp);
- RValue EmitExprWithCleanups(const ExprWithCleanups *E,
- AggValueSlot Slot =AggValueSlot::ignored());
+ void enterFullExpression(const ExprWithCleanups *E) {
+ if (E->getNumObjects() == 0) return;
+ enterNonTrivialFullExpression(E);
+ }
+ void enterNonTrivialFullExpression(const ExprWithCleanups *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E);
+ void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
+
RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
//===--------------------------------------------------------------------===//
@@ -2344,7 +2516,12 @@ public:
/// a r-value suitable for passing the given parameter.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
+ /// SetFPAccuracy - Set the minimum required accuracy of the given floating
+ /// point operation, expressed as the maximum relative error in ulp.
+ void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+
private:
+ llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
@@ -2432,6 +2609,18 @@ private:
CodeGenModule::ByrefHelpers *
buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission);
+
+ void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
+
+ /// GetPointeeAlignment - Given an expression with a pointer type, find the
+ /// alignment of the type referenced by the pointer. Skip over implicit
+ /// casts.
+ unsigned GetPointeeAlignment(const Expr *Addr);
+
+ /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+ /// the alignment of the type referenced by the pointer. Skip over implicit
+ /// casts. Return the alignment as an llvm::Value.
+ llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
};
/// Helper class with most of the code for saving a value for a
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 275045cff84c..c0ccf4de4c9f 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -38,6 +39,7 @@
#include "llvm/Module.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
@@ -56,35 +58,53 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
}
llvm_unreachable("invalid C++ ABI kind");
- return *CreateItaniumCXXABI(CGM);
}
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
DiagnosticsEngine &diags)
- : Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
- Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI, CGO),
+ Types(*this),
TBAA(0),
VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
- DebugInfo(0), ARCData(0), RRData(0), CFConstantStringClassRef(0),
+ DebugInfo(0), ARCData(0), NoObjCARCExceptionsMetadata(0),
+ RRData(0), CFConstantStringClassRef(0),
ConstantStringClassRef(0), NSConstantStringType(0),
VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
BlockObjectAssign(0), BlockObjectDispose(0),
BlockDescriptorType(0), GenericBlockLiteralType(0) {
- if (Features.ObjC1)
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ FloatTy = llvm::Type::getFloatTy(LLVMContext);
+ DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+
+ if (LangOpts.ObjC1)
createObjCRuntime();
- if (Features.OpenCL)
+ if (LangOpts.OpenCL)
createOpenCLRuntime();
- if (Features.CUDA)
+ if (LangOpts.CUDA)
createCUDARuntime();
// Enable TBAA unless it's suppressed.
if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
- TBAA = new CodeGenTBAA(Context, VMContext, getLangOptions(),
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
@@ -95,23 +115,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Block.GlobalUniqueCount = 0;
- if (C.getLangOptions().ObjCAutoRefCount)
+ if (C.getLangOpts().ObjCAutoRefCount)
ARCData = new ARCEntrypoints();
RRData = new RREntrypoints();
-
- // Initialize the type cache.
- llvm::LLVMContext &LLVMContext = M.getContext();
- VoidTy = llvm::Type::getVoidTy(LLVMContext);
- Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
- Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
- Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
- PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
- PointerAlignInBytes =
- C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
- IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
- IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
- Int8PtrTy = Int8Ty->getPointerTo(0);
- Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
}
CodeGenModule::~CodeGenModule() {
@@ -127,7 +133,7 @@ CodeGenModule::~CodeGenModule() {
}
void CodeGenModule::createObjCRuntime() {
- if (!Features.NeXTRuntime)
+ if (!LangOpts.NeXTRuntime)
ObjCRuntime = CreateGNUObjCRuntime(*this);
else
ObjCRuntime = CreateMacObjCRuntime(*this);
@@ -168,8 +174,6 @@ void CodeGenModule::Release() {
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
- if (DebugInfo)
- DebugInfo->UpdateCompletedType(TD);
}
llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
@@ -178,6 +182,12 @@ llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
return TBAA->getTBAAInfo(QTy);
}
+llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfoForVTablePtr();
+}
+
void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo) {
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
@@ -292,7 +302,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// If there's a key function, there may be translation units
// that don't have the key function's definition. But ignore
// this if we're emitting RTTI under -fno-rtti.
- if (!(TVK != TVK_ForRTTI) || Features.RTTI) {
+ if (!(TVK != TVK_ForRTTI) || LangOpts.RTTI) {
if (Context.getKeyFunction(RD))
return;
}
@@ -317,7 +327,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
return Str;
}
- llvm::SmallString<256> Buffer;
+ SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
@@ -379,16 +389,15 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Get the type of a ctor entry, { i32, void ()* }.
llvm::StructType *CtorStructTy =
- llvm::StructType::get(llvm::Type::getInt32Ty(VMContext),
- llvm::PointerType::getUnqual(CtorFTy), NULL);
+ llvm::StructType::get(Int32Ty, llvm::PointerType::getUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
- std::vector<llvm::Constant*> Ctors;
+ SmallVector<llvm::Constant*, 8> Ctors;
for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
- std::vector<llvm::Constant*> S;
- S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- I->second, false));
- S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ llvm::Constant *S[] = {
+ llvm::ConstantInt::get(Int32Ty, I->second, false),
+ llvm::ConstantExpr::getBitCast(I->first, CtorPFTy)
+ };
Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
}
@@ -431,7 +440,7 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
- return !Context.getLangOptions().AppleKext
+ return !Context.getLangOpts().AppleKext
? llvm::Function::LinkOnceODRLinkage
: llvm::Function::InternalLinkage;
@@ -440,7 +449,7 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
if (Linkage == GVA_ExplicitTemplateInstantiation)
- return !Context.getLangOptions().AppleKext
+ return !Context.getLangOpts().AppleKext
? llvm::Function::WeakODRLinkage
: llvm::Function::ExternalLinkage;
@@ -475,16 +484,16 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
/// except under the fragile ObjC ABI with only ObjC exceptions
/// enabled. This means, for example, that C with -fexceptions
/// enables this.
-static bool hasUnwindExceptions(const LangOptions &Features) {
+static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If exceptions are completely disabled, obviously this is false.
- if (!Features.Exceptions) return false;
+ if (!LangOpts.Exceptions) return false;
// If C++ exceptions are enabled, this is true.
- if (Features.CXXExceptions) return true;
+ if (LangOpts.CXXExceptions) return true;
// If ObjC exceptions are enabled, this depends on the ABI.
- if (Features.ObjCExceptions) {
- if (!Features.ObjCNonFragileABI) return false;
+ if (LangOpts.ObjCExceptions) {
+ if (!LangOpts.ObjCNonFragileABI) return false;
}
return true;
@@ -495,7 +504,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (CodeGenOpts.UnwindTables)
F->setHasUWTable();
- if (!hasUnwindExceptions(Features))
+ if (!hasUnwindExceptions(LangOpts))
F->addFnAttr(llvm::Attribute::NoUnwind);
if (D->hasAttr<NakedAttr>()) {
@@ -515,11 +524,18 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
- if (Features.getStackProtector() == LangOptions::SSPOn)
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
F->addFnAttr(llvm::Attribute::StackProtect);
- else if (Features.getStackProtector() == LangOptions::SSPReq)
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
F->addFnAttr(llvm::Attribute::StackProtectReq);
+ if (LangOpts.AddressSanitizer) {
+ // When AddressSanitizer is enabled, set AddressSafety attribute
+ // unless __attribute__((no_address_safety_analysis)) is used.
+ if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
+ F->addFnAttr(llvm::Attribute::AddressSafety);
+ }
+
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
F->setAlignment(alignment);
@@ -569,7 +585,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
- SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(GD), F);
+ SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
// Only a few attributes are set on declarations; these may later be
// overridden by a definition.
@@ -605,20 +621,18 @@ void CodeGenModule::EmitLLVMUsed() {
if (LLVMUsed.empty())
return;
- llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
-
// Convert LLVMUsed to what ConstantArray needs.
- std::vector<llvm::Constant*> UsedArray;
+ SmallVector<llvm::Constant*, 8> UsedArray;
UsedArray.resize(LLVMUsed.size());
for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
- i8PTy);
+ Int8PtrTy);
}
if (UsedArray.empty())
return;
- llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), ATy, false,
@@ -689,7 +703,7 @@ llvm::Constant *CodeGenModule::EmitAnnotationString(llvm::StringRef Str) {
return i->second;
// Not found yet, create a new global.
- llvm::Constant *s = llvm::ConstantArray::get(getLLVMContext(), Str, true);
+ llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(), s->getType(),
true, llvm::GlobalValue::PrivateLinkage, s, ".str");
gv->setSection(AnnotationSection);
@@ -744,7 +758,7 @@ void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
- if (Features.EmitAllDecls)
+ if (LangOpts.EmitAllDecls)
return false;
return !getContext().DeclMustBeEmitted(Global);
@@ -788,7 +802,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return EmitAliasDefinition(GD);
// If this is CUDA, be selective about which declarations we emit.
- if (Features.CUDA) {
+ if (LangOpts.CUDA) {
if (CodeGenOpts.CUDAIsDevice) {
if (!Global->hasAttr<CUDADeviceAttr>() &&
!Global->hasAttr<CUDAGlobalAttr>() &&
@@ -815,10 +829,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
FD->getBody(InlineDefinition);
StringRef MangledName = getMangledName(GD);
- llvm::StringMap<GlobalDecl>::iterator DDI =
- DeferredDecls.find(MangledName);
- if (DDI != DeferredDecls.end())
- DeferredDecls.erase(DDI);
+ DeferredDecls.erase(MangledName);
EmitGlobalDefinition(InlineDefinition);
return;
}
@@ -840,7 +851,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
- if (getLangOptions().CPlusPlus && isa<VarDecl>(Global) &&
+ if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
cast<VarDecl>(Global)->hasInit()) {
DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
CXXGlobalInits.push_back(0);
@@ -863,20 +874,28 @@ namespace {
struct FunctionIsDirectlyRecursive :
public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
const StringRef Name;
+ const Builtin::Context &BI;
bool Result;
- FunctionIsDirectlyRecursive(const FunctionDecl *F) :
- Name(F->getName()), Result(false) {
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
+ Name(N), BI(C), Result(false) {
}
typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
bool TraverseCallExpr(CallExpr *E) {
- const Decl *D = E->getCalleeDecl();
- if (!D)
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (!FD)
return true;
- AsmLabelAttr *Attr = D->getAttr<AsmLabelAttr>();
- if (!Attr)
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel()) {
+ Result = true;
+ return false;
+ }
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (!BuiltinID)
return true;
- if (Name == Attr->getLabel()) {
+ StringRef BuiltinName = BI.GetName(BuiltinID);
+ if (BuiltinName.startswith("__builtin_") &&
+ Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
Result = true;
return false;
}
@@ -885,15 +904,24 @@ namespace {
};
}
-// isTriviallyRecursiveViaAsm - Check if this function calls another
-// decl that, because of the asm attribute, ends up pointing to itself.
+// isTriviallyRecursive - Check if this function calls another
+// decl that, because of the asm attribute or the other decl being a builtin,
+// ends up pointing to itself.
bool
-CodeGenModule::isTriviallyRecursiveViaAsm(const FunctionDecl *F) {
- if (getCXXABI().getMangleContext().shouldMangleDeclName(F))
- return false;
+CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
+ StringRef Name;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
+ // asm labels are a special kind of mangling we have to support.
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return false;
+ Name = Attr->getLabel();
+ } else {
+ Name = FD->getName();
+ }
- FunctionIsDirectlyRecursive Walker(F);
- Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
+ FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
+ Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
return Walker.Result;
}
@@ -909,7 +937,7 @@ CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
// but a function that calls itself is clearly not equivalent to the real
// implementation.
// This happens in glibc's btowc and in some configure checks.
- return !isTriviallyRecursiveViaAsm(F);
+ return !isTriviallyRecursive(F);
}
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
@@ -1023,7 +1051,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
//
// We also don't emit a definition for a function if it's going to be an entry
// in a vtable, unless it's already marked as used.
- } else if (getLangOptions().CPlusPlus && D.getDecl()) {
+ } else if (getLangOpts().CPlusPlus && D.getDecl()) {
// Look for a declaration that's lexically in a record.
const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
do {
@@ -1037,7 +1065,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
break;
}
}
- FD = FD->getPreviousDeclaration();
+ FD = FD->getPreviousDecl();
} while (FD);
}
@@ -1075,19 +1103,23 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
ExtraAttrs);
}
-static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
- bool ConstantInit) {
- if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
+/// isTypeConstant - Determine whether an object of this type can be emitted
+/// as a constant.
+///
+/// If ExcludeCtor is true, the duration when the object's constructor runs
+/// will not be considered. The caller will need to verify that the object is
+/// not written to during its construction.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+ if (!Ty.isConstant(Context) && !Ty->isReferenceType())
return false;
-
- if (Context.getLangOptions().CPlusPlus) {
- if (const RecordType *Record
- = Context.getBaseElementType(D->getType())->getAs<RecordType>())
- return ConstantInit &&
- cast<CXXRecordDecl>(Record->getDecl())->isPOD() &&
- !cast<CXXRecordDecl>(Record->getDecl())->hasMutableFields();
+
+ if (Context.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *Record
+ = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
+ return ExcludeCtor && !Record->hasMutableFields() &&
+ Record->hasTrivialDestructor();
}
-
+
return true;
}
@@ -1144,7 +1176,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
if (D) {
// FIXME: This code is overly simple and should be merged with other global
// handling.
- GV->setConstant(DeclIsConstantGlobal(Context, D, false));
+ GV->setConstant(isTypeConstant(D->getType(), false));
// Set linkage and visibility in case we never see a definition.
NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
@@ -1280,19 +1312,19 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::AvailableExternallyLinkage;
if (KeyFunction->isInlined())
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
case TSK_ExplicitInstantiationDefinition:
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::WeakODRLinkage :
llvm::Function::InternalLinkage;
@@ -1300,13 +1332,13 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
}
}
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return llvm::Function::InternalLinkage;
switch (RD->getTemplateSpecializationKind()) {
@@ -1322,9 +1354,8 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
case TSK_ExplicitInstantiationDefinition:
return llvm::GlobalVariable::WeakODRLinkage;
}
-
- // Silence GCC warning.
- return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
@@ -1332,13 +1363,134 @@ CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
TheTargetData.getTypeStoreSizeInBits(Ty));
}
+llvm::Constant *
+CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *rawInit) {
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups;
+ if (const ExprWithCleanups *withCleanups =
+ dyn_cast<ExprWithCleanups>(rawInit)) {
+ cleanups = withCleanups->getObjects();
+ rawInit = withCleanups->getSubExpr();
+ }
+
+ const InitListExpr *init = dyn_cast<InitListExpr>(rawInit);
+ if (!init || !init->initializesStdInitializerList() ||
+ init->getNumInits() == 0)
+ return 0;
+
+ ASTContext &ctx = getContext();
+ unsigned numInits = init->getNumInits();
+ // FIXME: This check is here because we would otherwise silently miscompile
+ // nested global std::initializer_lists. Better would be to have a real
+ // implementation.
+ for (unsigned i = 0; i < numInits; ++i) {
+ const InitListExpr *inner = dyn_cast<InitListExpr>(init->getInit(i));
+ if (inner && inner->initializesStdInitializerList()) {
+ ErrorUnsupported(inner, "nested global std::initializer_list");
+ return 0;
+ }
+ }
+
+ // Synthesize a fake VarDecl for the array and initialize that.
+ QualType elementType = init->getInit(0)->getType();
+ llvm::APInt numElements(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType arrayType = ctx.getConstantArrayType(elementType, numElements,
+ ArrayType::Normal, 0);
+
+ IdentifierInfo *name = &ctx.Idents.get(D->getNameAsString() + "__initlist");
+ TypeSourceInfo *sourceInfo = ctx.getTrivialTypeSourceInfo(
+ arrayType, D->getLocation());
+ VarDecl *backingArray = VarDecl::Create(ctx, const_cast<DeclContext*>(
+ D->getDeclContext()),
+ D->getLocStart(), D->getLocation(),
+ name, arrayType, sourceInfo,
+ SC_Static, SC_Static);
+
+ // Now clone the InitListExpr to initialize the array instead.
+ // Incredible hack: we want to use the existing InitListExpr here, so we need
+ // to tell it that it no longer initializes a std::initializer_list.
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
+ const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits(),
+ init->getRBraceLoc());
+ arrayInit->setType(arrayType);
+
+ if (!cleanups.empty())
+ arrayInit = ExprWithCleanups::Create(ctx, arrayInit, cleanups);
+
+ backingArray->setInit(arrayInit);
+
+ // Emit the definition of the array.
+ EmitGlobalVarDefinition(backingArray);
+
+ // Inspect the initializer list to validate it and determine its type.
+ // FIXME: doing this every time is probably inefficient; caching would be nice
+ RecordDecl *record = init->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ QualType elementPtr = ctx.getPointerType(elementType.withConst());
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ ++field;
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ bool isStartEnd = false;
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ isStartEnd = true;
+ } else if(!ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+
+ // Now build an APValue representing the std::initializer_list.
+ APValue initListValue(APValue::UninitStruct(), 0, 2);
+ APValue &startField = initListValue.getStructField(0);
+ APValue::LValuePathEntry startOffsetPathEntry;
+ startOffsetPathEntry.ArrayIndex = 0;
+ startField = APValue(APValue::LValueBase(backingArray),
+ CharUnits::fromQuantity(0),
+ llvm::makeArrayRef(startOffsetPathEntry),
+ /*IsOnePastTheEnd=*/false, 0);
+
+ if (isStartEnd) {
+ APValue &endField = initListValue.getStructField(1);
+ APValue::LValuePathEntry endOffsetPathEntry;
+ endOffsetPathEntry.ArrayIndex = numInits;
+ endField = APValue(APValue::LValueBase(backingArray),
+ ctx.getTypeSizeInChars(elementType) * numInits,
+ llvm::makeArrayRef(endOffsetPathEntry),
+ /*IsOnePastTheEnd=*/true, 0);
+ } else {
+ APValue &sizeField = initListValue.getStructField(1);
+ sizeField = APValue(llvm::APSInt(numElements));
+ }
+
+ // Emit the constant for the initializer_list.
+ llvm::Constant *llvmInit =
+ EmitConstantValueForMemory(initListValue, D->getType());
+ assert(llvmInit && "failed to initialize as constant");
+ return llvmInit;
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
- bool NonConstInit = false;
+ CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ bool NeedsGlobalCtor = false;
+ bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
+
+ const VarDecl *InitDecl;
+ const Expr *InitExpr = D->getAnyInitializer(InitDecl);
- const Expr *InitExpr = D->getAnyInitializer();
-
if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
@@ -1352,23 +1504,32 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
Init = EmitNullConstant(D->getType());
} else {
- Init = EmitConstantExpr(InitExpr, D->getType());
+ // If this is a std::initializer_list, emit the special initializer.
+ Init = MaybeEmitGlobalStdInitializerListInitializer(D, InitExpr);
+ // An empty init list will perform zero-initialization, which happens
+ // to be exactly what we want.
+ // FIXME: It does so in a global constructor, which is *not* what we
+ // want.
+
+ if (!Init)
+ Init = EmitConstantInit(*InitDecl);
if (!Init) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
T = D->getType();
-
- if (getLangOptions().CPlusPlus) {
+
+ if (getLangOpts().CPlusPlus) {
Init = EmitNullConstant(T);
- NonConstInit = true;
+ NeedsGlobalCtor = true;
} else {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
}
} else {
// We don't need an initializer, so remove the entry for the delayed
- // initializer position (just in case this entry was delayed).
- if (getLangOptions().CPlusPlus)
+ // initializer position (just in case this entry was delayed) if we
+ // also don't need to register a destructor.
+ if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
DelayedCXXInitPosition.erase(D);
}
}
@@ -1422,12 +1583,11 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setInitializer(Init);
// If it is safe to mark the global 'constant', do so now.
- GV->setConstant(false);
- if (!NonConstInit && DeclIsConstantGlobal(Context, D, true))
- GV->setConstant(true);
+ GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
+ isTypeConstant(D->getType(), true));
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
-
+
// Set the llvm linkage type as appropriate.
llvm::GlobalValue::LinkageTypes Linkage =
GetLLVMLinkageVarDefinition(D, GV);
@@ -1439,8 +1599,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
SetCommonAttributes(D, GV);
// Emit the initializer function if necessary.
- if (NonConstInit)
- EmitCXXGlobalVarDeclInitFunc(D, GV);
+ if (NeedsGlobalCtor || NeedsGlobalDtor)
+ EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -1465,7 +1625,7 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
} else if (Linkage == GVA_TemplateInstantiation ||
Linkage == GVA_ExplicitTemplateInstantiation)
return llvm::GlobalVariable::WeakODRLinkage;
- else if (!getLangOptions().CPlusPlus &&
+ else if (!getLangOpts().CPlusPlus &&
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
@@ -1565,16 +1725,20 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
}
}
+void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
+ GetAddrOfGlobalVar(VD);
+}
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
- const CGFunctionInfo &FI = getTypes().getFunctionInfo(GD);
- bool variadic = false;
- if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
- variadic = fpt->isVariadic();
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1739,8 +1903,10 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
return Map.GetOrCreateValue(String);
}
- // Otherwise, convert the UTF8 literals into a byte string.
- SmallVector<UTF16, 128> ToBuf(NumBytes);
+ // Otherwise, convert the UTF8 literals into a string of shorts.
+ IsUTF16 = true;
+
+ SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
@@ -1751,38 +1917,20 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
- // Render the UTF-16 string into a byte array and convert to the target byte
- // order.
- //
- // FIXME: This isn't something we should need to do here.
- llvm::SmallString<128> AsBytes;
- AsBytes.reserve(StringLength * 2);
- for (unsigned i = 0; i != StringLength; ++i) {
- unsigned short Val = ToBuf[i];
- if (TargetIsLSB) {
- AsBytes.push_back(Val & 0xFF);
- AsBytes.push_back(Val >> 8);
- } else {
- AsBytes.push_back(Val >> 8);
- AsBytes.push_back(Val & 0xFF);
- }
- }
- // Append one extra null character, the second is automatically added by our
- // caller.
- AsBytes.push_back(0);
-
- IsUTF16 = true;
- return Map.GetOrCreateValue(StringRef(AsBytes.data(), AsBytes.size()));
+ // Add an explicit null.
+ *ToPtr = 0;
+ return Map.
+ GetOrCreateValue(StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2));
}
static llvm::StringMapEntry<llvm::Constant*> &
GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
- const StringLiteral *Literal,
- unsigned &StringLength)
-{
- StringRef String = Literal->getString();
- StringLength = String.size();
- return Map.GetOrCreateValue(String);
+ const StringLiteral *Literal,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return Map.GetOrCreateValue(String);
}
llvm::Constant *
@@ -1797,8 +1945,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
if (llvm::Constant *C = Entry.getValue())
return C;
- llvm::Constant *Zero =
- llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get __CFConstantStringClassReference.
@@ -1817,7 +1964,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(CFTy));
- std::vector<llvm::Constant*> Fields(4);
+ llvm::Constant *Fields[4];
// Class pointer.
Fields[0] = CFConstantStringClassRef;
@@ -1828,27 +1975,31 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::ConstantInt::get(Ty, 0x07C8);
// String pointer.
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+ llvm::Constant *C = 0;
+ if (isUTF16) {
+ ArrayRef<uint16_t> Arr =
+ llvm::makeArrayRef<uint16_t>((uint16_t*)Entry.getKey().data(),
+ Entry.getKey().size() / 2);
+ C = llvm::ConstantDataArray::get(VMContext, Arr);
+ } else {
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+ }
llvm::GlobalValue::LinkageTypes Linkage;
- bool isConstant;
- if (isUTF16) {
+ if (isUTF16)
// FIXME: why do utf strings get "_" labels instead of "L" labels?
Linkage = llvm::GlobalValue::InternalLinkage;
- // Note: -fwritable-strings doesn't make unicode CFStrings writable, but
- // does make plain ascii ones writable.
- isConstant = true;
- } else {
+ else
// FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
// when using private linkage. It is not clear if this is a bug in ld
// or a reasonable new restriction.
Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
- isConstant = !Features.WritableStrings;
- }
+ // Note: -fwritable-strings doesn't make the backing store strings of
+ // CFStrings writable. (See <rdar://problem/10657500>)
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
- ".str");
+ new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+ Linkage, C, ".str");
GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
@@ -1857,8 +2008,14 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
}
+
+ // String.
Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ if (isUTF16)
+ // Cast the UTF16 string to the correct type.
+ Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+
// String length.
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
@@ -1879,7 +2036,7 @@ static RecordDecl *
CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
DeclContext *DC, IdentifierInfo *Id) {
SourceLocation Loc;
- if (Ctx.getLangOptions().CPlusPlus)
+ if (Ctx.getLangOpts().CPlusPlus)
return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
else
return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
@@ -1894,16 +2051,15 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
if (llvm::Constant *C = Entry.getValue())
return C;
- llvm::Constant *Zero =
- llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
- std::string StringClass(getLangOptions().ObjCConstantStringClass);
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
- if (Features.ObjCNonFragileABI) {
+ if (LangOpts.ObjCNonFragileABI) {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
@@ -1958,18 +2114,19 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
}
- std::vector<llvm::Constant*> Fields(3);
+ llvm::Constant *Fields[3];
// Class pointer.
Fields[0] = ConstantStringClassRef;
// String pointer.
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
llvm::GlobalValue::LinkageTypes Linkage;
bool isConstant;
Linkage = llvm::GlobalValue::PrivateLinkage;
- isConstant = !Features.WritableStrings;
+ isConstant = !LangOpts.WritableStrings;
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
@@ -1990,7 +2147,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
"_unnamed_nsstring_");
// FIXME. Fix section.
if (const char *Sect =
- Features.ObjCNonFragileABI
+ LangOpts.ObjCNonFragileABI
? getContext().getTargetInfo().getNSStringNonFragileABISection()
: getContext().getTargetInfo().getNSStringSection())
GV->setSection(Sect);
@@ -2034,54 +2191,72 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
return ObjCFastEnumerationStateType;
}
-/// GetStringForStringLiteral - Return the appropriate bytes for a
-/// string literal, properly padded to match the literal type.
-std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
- const ASTContext &Context = getContext();
- const ConstantArrayType *CAT =
- Context.getAsConstantArrayType(E->getType());
- assert(CAT && "String isn't pointer or array!");
-
- // Resize the string to the right size.
- uint64_t RealLen = CAT->getSize().getZExtValue();
+llvm::Constant *
+CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ if (E->getCharByteWidth() == 1) {
+ SmallString<64> Str(E->getString());
- switch (E->getKind()) {
- case StringLiteral::Ascii:
- case StringLiteral::UTF8:
- break;
- case StringLiteral::Wide:
- RealLen *= Context.getTargetInfo().getWCharWidth() / Context.getCharWidth();
- break;
- case StringLiteral::UTF16:
- RealLen *= Context.getTargetInfo().getChar16Width() / Context.getCharWidth();
- break;
- case StringLiteral::UTF32:
- RealLen *= Context.getTargetInfo().getChar32Width() / Context.getCharWidth();
- break;
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
- std::string Str = E->getString().str();
- Str.resize(RealLen, '\0');
+ // Wide strings have either 2-byte or 4-byte elements.
+ if (ElemTy->getPrimitiveSizeInBits() == 16) {
+ SmallVector<uint16_t, 32> Elements;
+ Elements.reserve(NumElements);
- return Str;
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+ }
+
+ assert(ElemTy->getPrimitiveSizeInBits() == 32);
+ SmallVector<uint32_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
}
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
llvm::Constant *
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
- // FIXME: This can be more efficient.
- // FIXME: We shouldn't need to bitcast the constant in the wide string case.
CharUnits Align = getContext().getTypeAlignInChars(S->getType());
- llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S),
- /* GlobalName */ 0,
- Align.getQuantity());
- if (S->isWide() || S->isUTF16() || S->isUTF32()) {
- llvm::Type *DestTy =
- llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
- C = llvm::ConstantExpr::getBitCast(C, DestTy);
+ if (S->isAscii() || S->isUTF8()) {
+ SmallString<64> Str(S->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return GetAddrOfConstantString(Str, /*GlobalName*/ 0, Align.getQuantity());
}
- return C;
+
+ // FIXME: the following does not memoize wide strings.
+ llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(),C->getType(),
+ !LangOpts.WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C,".str");
+
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(true);
+ return GV;
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -2103,7 +2278,7 @@ static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
unsigned Alignment) {
// Create Constant for this string literal. Don't add a '\0'.
llvm::Constant *C =
- llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), str, false);
// Create a global variable for this string
llvm::GlobalVariable *GV =
@@ -2126,14 +2301,12 @@ static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
const char *GlobalName,
unsigned Alignment) {
- bool IsConstant = !Features.WritableStrings;
-
// Get the default prefix if a name wasn't specified.
if (!GlobalName)
GlobalName = ".str";
// Don't share any string literals if strings aren't constant.
- if (!IsConstant)
+ if (LangOpts.WritableStrings)
return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
@@ -2147,7 +2320,8 @@ llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
}
// Create a global variable for this.
- llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName, Alignment);
+ llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName,
+ Alignment);
Entry.setValue(GV);
return GV;
}
@@ -2308,6 +2482,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::TypeAliasTemplate:
case Decl::NamespaceAlias:
case Decl::Block:
+ case Decl::Import:
break;
case Decl::CXXConstructor:
// Skip function templates
@@ -2330,8 +2505,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Objective-C Decls
// Forward declarations, no (immediate) code generation.
- case Decl::ObjCClass:
- case Decl::ObjCForwardProtocol:
case Decl::ObjCInterface:
break;
@@ -2342,10 +2515,13 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
- case Decl::ObjCProtocol:
- ObjCRuntime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ case Decl::ObjCProtocol: {
+ ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
+ if (Proto->isThisDeclarationADefinition())
+ ObjCRuntime->GenerateProtocol(Proto);
break;
-
+ }
+
case Decl::ObjCCategoryImpl:
// Categories have properties but don't support synthesize so we
// can ignore them here.
@@ -2354,11 +2530,16 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
- if (Features.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
ObjCRuntime->GenerateClass(OMD);
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(OMD->getClassInterface()),
+ OMD->getLocation());
+
break;
}
case Decl::ObjCMethod: {
@@ -2369,7 +2550,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
case Decl::ObjCCompatibleAlias:
- // compatibility-alias is a directive and has no code gen.
+ ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
break;
case Decl::LinkageSpec:
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index dbc6a87bd19b..38f5008d89a8 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -26,7 +26,6 @@
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ValueHandle.h"
@@ -56,6 +55,7 @@ namespace clang {
class Decl;
class Expr;
class Stmt;
+ class InitListExpr;
class StringLiteral;
class NamedDecl;
class ValueDecl;
@@ -103,8 +103,10 @@ namespace CodeGen {
/// void
llvm::Type *VoidTy;
- /// i8, i32, and i64
- llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
/// int
llvm::IntegerType *IntTy;
@@ -213,7 +215,7 @@ class CodeGenModule : public CodeGenTypeCache {
typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
ASTContext &Context;
- const LangOptions &Features;
+ const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
const llvm::TargetData &TheTargetData;
@@ -232,6 +234,7 @@ class CodeGenModule : public CodeGenTypeCache {
CGCUDARuntime* CUDARuntime;
CGDebugInfo* DebugInfo;
ARCEntrypoints *ARCData;
+ llvm::MDNode *NoObjCARCExceptionsMetadata;
RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
@@ -276,7 +279,11 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
- llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
+
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
@@ -324,7 +331,7 @@ class CodeGenModule : public CodeGenTypeCache {
void createOpenCLRuntime();
void createCUDARuntime();
- bool isTriviallyRecursiveViaAsm(const FunctionDecl *F);
+ bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(const FunctionDecl *F);
llvm::LLVMContext &VMContext;
@@ -382,7 +389,7 @@ public:
CGCXXABI &getCXXABI() { return ABI; }
ARCEntrypoints &getARCEntrypoints() const {
- assert(getLangOptions().ObjCAutoRefCount && ARCData != 0);
+ assert(getLangOpts().ObjCAutoRefCount && ARCData != 0);
return *ARCData;
}
@@ -391,19 +398,51 @@ public:
return *RRData;
}
- llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
- return StaticLocalDeclMap[VD];
+ llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
+ return StaticLocalDeclMap[D];
}
void setStaticLocalDeclAddress(const VarDecl *D,
- llvm::GlobalVariable *GV) {
- StaticLocalDeclMap[D] = GV;
+ llvm::Constant *C) {
+ StaticLocalDeclMap[D] = C;
+ }
+
+ llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
+ return StaticLocalDeclGuardMap[D];
+ }
+ void setStaticLocalDeclGuardAddress(const VarDecl *D,
+ llvm::GlobalVariable *C) {
+ StaticLocalDeclGuardMap[D] = C;
+ }
+
+ llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) {
+ return AtomicSetterHelperFnMap[Ty];
+ }
+ void setAtomicSetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicSetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) {
+ return AtomicGetterHelperFnMap[Ty];
+ }
+ void setAtomicGetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicGetterHelperFnMap[Ty] = Fn;
}
CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+ llvm::MDNode *getNoObjCARCExceptionsMetadata() {
+ if (!NoObjCARCExceptionsMetadata)
+ NoObjCARCExceptionsMetadata =
+ llvm::MDNode::get(getLLVMContext(),
+ SmallVector<llvm::Value*,1>());
+ return NoObjCARCExceptionsMetadata;
+ }
+
ASTContext &getContext() const { return Context; }
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
- const LangOptions &getLangOptions() const { return Features; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
CodeGenVTables &getVTables() { return VTables; }
@@ -418,6 +457,9 @@ public:
bool shouldUseTBAA() const { return TBAA != 0; }
llvm::MDNode *getTBAAInfo(QualType QTy);
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
static void DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo);
@@ -451,7 +493,6 @@ public:
case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
}
llvm_unreachable("unknown visibility!");
- return llvm::GlobalValue::DefaultVisibility;
}
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
@@ -551,11 +592,6 @@ public:
/// requires no captures.
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
- /// GetStringForStringLiteral - Return the appropriate bytes for a string
- /// literal, properly padded to match the literal type. If only the address of
- /// a constant is needed consider using GetAddrOfConstantStringLiteral.
- std::string GetStringForStringLiteral(const StringLiteral *E);
-
/// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
/// for the given string.
llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
@@ -565,6 +601,10 @@ public:
/// -fconstant-string-class=class_name option.
llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
+ /// GetConstantArrayFromStringLiteral - Return a constant array for the given
+ /// string.
+ llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
+
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
/// for the given string literal.
llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
@@ -596,6 +636,10 @@ public:
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0,
unsigned Alignment=1);
+
+ /// GetAddrOfConstantCompoundLiteral - Returns a pointer to a constant global
+ /// variable for the given file-scope compound literal expression.
+ llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
/// \brief Retrieve the record type that describes the state of an
/// Objective-C fast enumeration loop (for..in).
@@ -624,6 +668,10 @@ public:
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+
/// AddUsedGlobal - Add a global which should be forced to be
/// present in the object file; these are emitted to the llvm.used
/// metadata global.
@@ -661,12 +709,28 @@ public:
llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+ /// EmitConstantInit - Try to emit the initializer for the given declaration
+ /// as a constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF = 0);
+
/// EmitConstantExpr - Try to emit the given expression as a
/// constant; returns 0 if the expression cannot be emitted as a
/// constant.
llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
CodeGenFunction *CGF = 0);
+ /// EmitConstantValue - Emit the given constant value as a constant, in the
+ /// type's scalar representation.
+ llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValueForMemory - Emit the given constant value as a constant,
+ /// in the type's memory representation.
+ llvm::Constant *EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF = 0);
+
/// EmitNullConstant - Return the result of value-initializing the given
/// type, i.e. a null expression of the given type. This is usually,
/// but not always, an LLVM null constant.
@@ -715,10 +779,14 @@ public:
/// as a return type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
- /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
- /// as a return type.
+ /// ReturnTypeUsesFPRet - Return true iff the given type uses 'fpret' when
+ /// used as a return type.
bool ReturnTypeUsesFPRet(QualType ResultType);
+ /// ReturnTypeUsesFP2Ret - Return true iff the given type uses 'fp2ret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFP2Ret(QualType ResultType);
+
/// ConstructAttributeList - Get the LLVM attributes and calling convention to
/// use for a particular function type.
///
@@ -830,6 +898,8 @@ private:
void EmitGlobalFunctionDefinition(GlobalDecl GD);
void EmitGlobalVarDefinition(const VarDecl *D);
+ llvm::Constant *MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *init);
void EmitAliasDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
@@ -864,8 +934,11 @@ private:
/// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
void EmitCXXGlobalDtorFunc();
+ /// EmitCXXGlobalVarDeclInitFunc - Emit the function that initializes the
+ /// specified global (if PerformInit is true) and registers its destructor.
void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
- llvm::GlobalVariable *Addr);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index 887c1eabb0c6..9ee3f1d2e67c 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -169,7 +169,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
Out.flush();
@@ -179,3 +179,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// For now, handle any other kind of type conservatively.
return MetadataCache[Ty] = getChar();
}
+
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
+ return getTBAAInfoForNamedType("vtable pointer", getRoot());
+}
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index 9fe51fb33141..8e08498b7e59 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -68,6 +68,10 @@ public:
/// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
/// of the given type.
llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ /// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
+ /// dereference of a vtable pointer.
+ llvm::MDNode *getTBAAInfoForVTablePtr();
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index e0d921896580..41fd536b5cc2 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -15,6 +15,7 @@
#include "CGCall.h"
#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
@@ -26,11 +27,12 @@
using namespace clang;
using namespace CodeGen;
-CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
- const llvm::TargetData &TD, const ABIInfo &Info,
- CGCXXABI &CXXABI, const CodeGenOptions &CGO)
- : Context(Ctx), Target(Ctx.getTargetInfo()), TheModule(M), TheTargetData(TD),
- TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) {
+CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
+ : Context(CGM.getContext()), Target(Context.getTargetInfo()),
+ TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
+ TheCXXABI(CGM.getCXXABI()),
+ CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
SkippedLayout = false;
}
@@ -48,7 +50,7 @@ CodeGenTypes::~CodeGenTypes() {
void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
llvm::StructType *Ty,
StringRef suffix) {
- llvm::SmallString<256> TypeName;
+ SmallString<256> TypeName;
llvm::raw_svector_ostream OS(TypeName);
OS << RD->getKindName() << '.';
@@ -193,11 +195,9 @@ bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
// If this isn't a tagged type, we can convert it!
const TagType *TT = Ty->getAs<TagType>();
if (TT == 0) return true;
-
-
- // If it's a tagged type used by-value, but is just a forward decl, we can't
- // convert it. Note that getDefinition()==0 is not the same as !isDefinition.
- if (TT->getDecl()->getDefinition() == 0)
+
+ // Incomplete types cannot be converted.
+ if (TT->isIncompleteType())
return false;
// If this is an enum, then it is always safe to convert.
@@ -305,7 +305,6 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("Non-canonical or dependent types aren't possible.");
- break;
case Type::Builtin: {
switch (cast<BuiltinType>(Ty)->getKind()) {
@@ -368,12 +367,12 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
break;
- case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
llvm_unreachable("Unexpected placeholder builtin type!");
- break;
}
break;
}
@@ -474,16 +473,13 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
- bool isVariadic;
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- FI = &getFunctionInfo(
+ FI = &arrangeFunctionType(
CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
- isVariadic = FPT->isVariadic();
} else {
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
- FI = &getFunctionInfo(
+ FI = &arrangeFunctionType(
CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
- isVariadic = true;
}
// If there is something higher level prodding our CGFunctionInfo, then
@@ -495,7 +491,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
} else {
// Otherwise, we're good to go, go ahead and convert it.
- ResultType = GetFunctionType(*FI, isVariadic);
+ ResultType = GetFunctionType(*FI);
}
RecordsBeingLaidOut.erase(Ty);
@@ -560,7 +556,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::Atomic: {
- ResultType = ConvertTypeForMem(cast<AtomicType>(Ty)->getValueType());
+ ResultType = ConvertType(cast<AtomicType>(Ty)->getValueType());
break;
}
}
@@ -655,7 +651,7 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
- if (!Context.getLangOptions().CPlusPlus)
+ if (!Context.getLangOpts().CPlusPlus)
return true;
T = Context.getBaseElementType(T);
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 7f0f8ac5f0c5..ba2b3ae54902 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -52,10 +52,13 @@ namespace clang {
namespace CodeGen {
class CGCXXABI;
class CGRecordLayout;
+ class CodeGenModule;
+ class RequiredArgs;
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTypes {
+ // Some of this stuff should probably be left on the CGM.
ASTContext &Context;
const TargetInfo &Target;
llvm::Module &TheModule;
@@ -63,6 +66,7 @@ class CodeGenTypes {
const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
const CodeGenOptions &CodeGenOpts;
+ CodeGenModule &CGM;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
@@ -101,9 +105,7 @@ private:
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
- CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
- const ABIInfo &Info, CGCXXABI &CXXABI,
- const CodeGenOptions &Opts);
+ CodeGenTypes(CodeGenModule &CGM);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
@@ -124,8 +126,7 @@ public:
llvm::Type *ConvertTypeForMem(QualType T);
/// GetFunctionType - Get the LLVM function type for \arg Info.
- llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic);
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
llvm::FunctionType *GetFunctionType(GlobalDecl GD);
@@ -148,50 +149,66 @@ public:
/// getNullaryFunctionInfo - Get the function info for a void()
/// function with standard CC.
- const CGFunctionInfo &getNullaryFunctionInfo();
-
- /// getFunctionInfo - Get the function info for the specified function decl.
- const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
-
- const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
- const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
- const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
- const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D,
- CXXCtorType Type);
- const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D,
- CXXDtorType Type);
-
- const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
- const FunctionType *Ty) {
- return getFunctionInfo(Ty->getResultType(), Args,
- Ty->getExtInfo());
- }
-
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
-
- /// getFunctionInfo - Get the function info for a member function of
- /// the given type. This is used for calls through member function
- /// pointers.
- const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP);
-
- /// getFunctionInfo - Get the function info for a function described by a
- /// return type and argument types. If the calling convention is not
- /// specified, the "C" calling convention will be used.
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
- const CallArgList &Args,
- const FunctionType::ExtInfo &Info);
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args,
- const FunctionType::ExtInfo &Info);
+ const CGFunctionInfo &arrangeNullaryFunction();
+
+ // The arrangement methods are split into three families:
+ // - those meant to drive the signature and prologue/epilogue
+ // of a function declaration or definition,
+ // - those meant for the computation of the LLVM type for an abstract
+ // appearance of a function, and
+ // - those meant for performing the IR-generation of a call.
+ // They differ mainly in how they deal with optional (i.e. variadic)
+ // arguments, as well as unprototyped functions.
+ //
+ // Key points:
+ // - The CGFunctionInfo for emitting a specific call site must include
+ // entries for the optional arguments.
+ // - The function type used at the call site must reflect the formal
+ // signature of the declaration being called, or else the call will
+ // go awry.
+ // - For the most part, unprototyped functions are called by casting to
+ // a formal signature inferred from the specific argument types used
+ // at the call-site. However, some targets (e.g. x86-64) screw with
+ // this for compatibility reasons.
+
+ const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info,
+ bool isVariadic);
+
+ const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType);
+
+ const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeCXXConstructorDeclaration(
+ const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
/// Retrieves the ABI information for the given function signature.
+ /// This is the "core" routine to which all the others defer.
///
- /// \param ArgTys - must all actually be canonical as params
- const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
- const SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info);
+ /// \param argTypes - must all actually be canonical as params
+ const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs args);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index c3f635aed642..98f67f34dfe1 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -73,15 +73,17 @@ public:
llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src);
-
- llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E);
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset);
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+ llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment);
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -121,7 +123,7 @@ public:
llvm::Value *&AllocPtr, CharUnits &CookieSize);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::GlobalVariable *DeclPtr);
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -216,8 +218,8 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
llvm::IntegerType *ptrdiff = getPtrDiffTy();
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
@@ -312,7 +314,10 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
return Builder.CreateBitCast(Addr, PType);
}
-/// Perform a derived-to-base or base-to-derived member pointer conversion.
+/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
+/// conversion.
+///
+/// Bitcast conversions are always a no-op under Itanium.
///
/// Obligatory offset/adjustment diagram:
/// <-- offset --> <-- adjustment -->
@@ -335,146 +340,106 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
llvm::Value *
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
- llvm::Value *Src) {
+ llvm::Value *src) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
- E->getCastKind() == CK_BaseToDerivedMemberPointer);
-
- if (isa<llvm::Constant>(Src))
- return EmitMemberPointerConversion(cast<llvm::Constant>(Src), E);
-
- CGBuilderTy &Builder = CGF.Builder;
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
- const MemberPointerType *SrcTy =
- E->getSubExpr()->getType()->getAs<MemberPointerType>();
- const MemberPointerType *DestTy = E->getType()->getAs<MemberPointerType>();
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
- const CXXRecordDecl *SrcDecl = SrcTy->getClass()->getAsCXXRecordDecl();
- const CXXRecordDecl *DestDecl = DestTy->getClass()->getAsCXXRecordDecl();
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
- bool DerivedToBase =
- E->getCastKind() == CK_DerivedToBaseMemberPointer;
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
- const CXXRecordDecl *DerivedDecl;
- if (DerivedToBase)
- DerivedDecl = SrcDecl;
- else
- DerivedDecl = DestDecl;
+ CGBuilderTy &Builder = CGF.Builder;
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
- llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- E->path_begin(),
- E->path_end());
- if (!Adj) return Src;
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
// For member data pointers, this is just a matter of adding the
// offset if the source is non-null.
- if (SrcTy->isMemberDataPointer()) {
- llvm::Value *Dst;
- if (DerivedToBase)
- Dst = Builder.CreateNSWSub(Src, Adj, "adj");
+ if (destTy->isMemberDataPointer()) {
+ llvm::Value *dst;
+ if (isDerivedToBase)
+ dst = Builder.CreateNSWSub(src, adj, "adj");
else
- Dst = Builder.CreateNSWAdd(Src, Adj, "adj");
+ dst = Builder.CreateNSWAdd(src, adj, "adj");
// Null check.
- llvm::Value *Null = llvm::Constant::getAllOnesValue(Src->getType());
- llvm::Value *IsNull = Builder.CreateICmpEQ(Src, Null, "memptr.isnull");
- return Builder.CreateSelect(IsNull, Src, Dst);
+ llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
+ llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
+ return Builder.CreateSelect(isNull, src, dst);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (IsARM) {
- uint64_t Offset = cast<llvm::ConstantInt>(Adj)->getZExtValue();
- Offset <<= 1;
- Adj = llvm::ConstantInt::get(Adj->getType(), Offset);
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::Value *SrcAdj = Builder.CreateExtractValue(Src, 1, "src.adj");
- llvm::Value *DstAdj;
- if (DerivedToBase)
- DstAdj = Builder.CreateNSWSub(SrcAdj, Adj, "adj");
+ llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
+ llvm::Value *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
else
- DstAdj = Builder.CreateNSWAdd(SrcAdj, Adj, "adj");
+ dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
- return Builder.CreateInsertValue(Src, DstAdj, 1);
+ return Builder.CreateInsertValue(src, dstAdj, 1);
}
llvm::Constant *
-ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E) {
- const MemberPointerType *SrcTy =
- E->getSubExpr()->getType()->getAs<MemberPointerType>();
- const MemberPointerType *DestTy =
- E->getType()->getAs<MemberPointerType>();
-
- bool DerivedToBase =
- E->getCastKind() == CK_DerivedToBaseMemberPointer;
-
- const CXXRecordDecl *DerivedDecl;
- if (DerivedToBase)
- DerivedDecl = SrcTy->getClass()->getAsCXXRecordDecl();
- else
- DerivedDecl = DestTy->getClass()->getAsCXXRecordDecl();
-
- // Calculate the offset to the base class.
- llvm::Constant *Offset =
- CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- E->path_begin(),
- E->path_end());
- // If there's no offset, we're done.
- if (!Offset) return C;
-
- // If the source is a member data pointer, we have to do a null
- // check and then add the offset. In the common case, we can fold
- // away the offset.
- if (SrcTy->isMemberDataPointer()) {
- assert(C->getType() == getPtrDiffTy());
-
- // If it's a constant int, just create a new constant int.
- if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
- int64_t Src = CI->getSExtValue();
-
- // Null converts to null.
- if (Src == -1) return CI;
-
- // Otherwise, just add the offset.
- int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
- int64_t Dst = (DerivedToBase ? Src - OffsetV : Src + OffsetV);
- return llvm::ConstantInt::get(CI->getType(), Dst, /*signed*/ true);
- }
+ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
- // Otherwise, we have to form a constant select expression.
- llvm::Constant *Null = llvm::Constant::getAllOnesValue(C->getType());
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
- llvm::Constant *IsNull =
- llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, C, Null);
+ // If the adjustment is trivial, we don't need to do anything.
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
- llvm::Constant *Dst;
- if (DerivedToBase)
- Dst = llvm::ConstantExpr::getNSWSub(C, Offset);
- else
- Dst = llvm::ConstantExpr::getNSWAdd(C, Offset);
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ // null maps to null.
+ if (src->isAllOnesValue()) return src;
- return llvm::ConstantExpr::getSelect(IsNull, Null, Dst);
+ if (isDerivedToBase)
+ return llvm::ConstantExpr::getNSWSub(src, adj);
+ else
+ return llvm::ConstantExpr::getNSWAdd(src, adj);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (IsARM) {
- int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
- OffsetV <<= 1;
- Offset = llvm::ConstantInt::get(Offset->getType(), OffsetV);
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
-
- llvm::Constant *Values[2] = { CS->getOperand(0), 0 };
- if (DerivedToBase)
- Values[1] = llvm::ConstantExpr::getSub(CS->getOperand(1), Offset);
+ llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
else
- Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
-
- return llvm::ConstantStruct::get(CS->getType(), Values);
-}
+ dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
+ return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+}
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
@@ -500,6 +465,11 @@ ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return BuildMemberPointer(MD, CharUnits::Zero());
+}
+
+llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment) {
assert(MD->isInstance() && "Member function must not be static!");
MD = MD->getCanonicalDecl();
@@ -524,14 +494,16 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
// discrimination as the least significant bit of ptr does for
// Itanium.
MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ 2 * ThisAdjustment.getQuantity() + 1);
} else {
// Itanium C++ ABI 2.3:
// For a virtual function, [the pointer field] is 1 plus the
// virtual table offset (in bytes) of the function,
// represented as a ptrdiff_t.
MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ ThisAdjustment.getQuantity());
}
} else {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
@@ -539,8 +511,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
- Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
- FPT->isVariadic());
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
@@ -549,12 +520,45 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) *
+ ThisAdjustment.getQuantity());
}
return llvm::ConstantStruct::getAnon(MemPtr);
}
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(MPT);
+
+ // Compute the this-adjustment.
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
+ return BuildMemberPointer(MD, ThisAdjustment);
+
+ CharUnits FieldOffset =
+ getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
+ return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
+}
+
/// The comparison algorithm is pretty easy: the member pointers are
/// the same if they're either bitwise identical *or* both null.
///
@@ -775,7 +779,7 @@ void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
/// Initialize the return slot to 'this' at the start of the
/// function.
if (HasThisReturn(CGF.CurGD))
- CGF.Builder.CreateStore(CGF.LoadCXXThis(), CGF.ReturnValue);
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
@@ -801,7 +805,7 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
// Automatic Reference Counting:
// We need an array cookie for pointers with strong or weak lifetime.
QualType AllocatedType = expr->getAllocatedType();
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
AllocatedType->isObjCLifetimeType()) {
switch (AllocatedType.getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -1020,27 +1024,28 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
+ llvm::Attribute::NoUnwind);
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
+ llvm::Attribute::NoUnwind);
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
+ llvm::Attribute::NoUnwind);
}
namespace {
@@ -1059,43 +1064,53 @@ namespace {
/// just special-case it at particular places.
void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
const VarDecl &D,
- llvm::GlobalVariable *GV) {
+ llvm::GlobalVariable *var,
+ bool shouldPerformInit) {
CGBuilderTy &Builder = CGF.Builder;
// We only need to use thread-safe statics for local variables;
// global initialization is always single-threaded.
bool threadsafe =
- (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
-
- llvm::IntegerType *GuardTy;
+ (getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl());
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
- bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage();
+ bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
+
+ llvm::IntegerType *guardTy;
if (useInt8GuardVariable) {
- GuardTy = CGF.Int8Ty;
+ guardTy = CGF.Int8Ty;
} else {
// Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
- GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ guardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ }
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+
+ // Create the guard variable if we don't already have it (as we
+ // might if we're double-emitting this function body).
+ llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
+ if (!guard) {
+ // Mangle the name for the guard.
+ SmallString<256> guardName;
+ {
+ llvm::raw_svector_ostream out(guardName);
+ getMangleContext().mangleItaniumGuardVariable(&D, out);
+ out.flush();
+ }
+
+ // Create the guard variable with a zero-initializer.
+ // Just absorb linkage and visibility from the guarded variable.
+ guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
+ false, var->getLinkage(),
+ llvm::ConstantInt::get(guardTy, 0),
+ guardName.str());
+ guard->setVisibility(var->getVisibility());
+
+ CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
- llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
-
- // Create the guard variable.
- llvm::SmallString<256> GuardVName;
- llvm::raw_svector_ostream Out(GuardVName);
- getMangleContext().mangleItaniumGuardVariable(&D, Out);
- Out.flush();
-
- // Just absorb linkage and visibility from the variable.
- llvm::GlobalVariable *GuardVariable =
- new llvm::GlobalVariable(CGM.getModule(), GuardTy,
- false, GV->getLinkage(),
- llvm::ConstantInt::get(GuardTy, 0),
- GuardVName.str());
- GuardVariable->setVisibility(GV->getVisibility());
// Test whether the variable has completed initialization.
- llvm::Value *IsInitialized;
+ llvm::Value *isInitialized;
// ARM C++ ABI 3.2.3.1:
// To support the potential use of initialization guard variables
@@ -1109,9 +1124,9 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// ...
// }
if (IsARM && !useInt8GuardVariable) {
- llvm::Value *V = Builder.CreateLoad(GuardVariable);
+ llvm::Value *V = Builder.CreateLoad(guard);
V = Builder.CreateAnd(V, Builder.getInt32(1));
- IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+ isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
// Itanium C++ ABI 3.3.2:
// The following is pseudo-code showing how these functions can be used:
@@ -1129,9 +1144,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// }
} else {
// Load the first byte of the guard variable.
- llvm::Type *PtrTy = Builder.getInt8PtrTy();
llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy));
+ Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
LI->setAlignment(1);
// Itanium ABI:
@@ -1143,14 +1157,14 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
if (threadsafe)
LI->setAtomic(llvm::Acquire);
- IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
+ isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
}
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
// Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+ Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
CGF.EmitBlock(InitCheckBlock);
@@ -1158,7 +1172,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
if (threadsafe) {
// Call __cxa_guard_acquire.
llvm::Value *V
- = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
+ = Builder.CreateCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
@@ -1166,22 +1180,22 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
InitBlock, EndBlock);
// Call __cxa_guard_abort along the exceptional edge.
- CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
CGF.EmitBlock(InitBlock);
}
// Emit the initializer and add a global destructor if appropriate.
- CGF.EmitCXXGlobalVarDeclInit(D, GV);
+ CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
if (threadsafe) {
// Pop the guard-abort cleanup if we pushed one.
CGF.PopCleanupBlock();
// Call __cxa_guard_release. This cannot throw.
- Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable);
+ Builder.CreateCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
} else {
- Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
}
CGF.EmitBlock(EndBlock);
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index e200e7961701..825e0415227d 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -78,6 +78,13 @@ public:
// delete[] p;
// }
// Whereas it prints "104" and "104" if you give A a destructor.
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
+ "in the Microsoft C++ ABI");
+ }
};
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 793ee9192e6b..ea2389e66b5a 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -28,12 +28,12 @@ using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
- llvm::OwningPtr<const llvm::TargetData> TD;
+ OwningPtr<const llvm::TargetData> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
protected:
- llvm::OwningPtr<llvm::Module> M;
- llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ OwningPtr<llvm::Module> M;
+ OwningPtr<CodeGen::CodeGenModule> Builder;
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
const CodeGenOptions &CGO, llvm::LLVMContext& C)
@@ -59,10 +59,15 @@ namespace {
*M, *TD, Diags));
}
- virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Builder->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
// Make sure to emit all elements of a Decl.
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
Builder->EmitTopLevelDecl(*I);
+ return true;
}
/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
@@ -74,7 +79,7 @@ namespace {
// In C++, we may have member functions that need to be emitted at this
// point.
- if (Ctx->getLangOptions().CPlusPlus && !D->isDependentContext()) {
+ if (Ctx->getLangOpts().CPlusPlus && !D->isDependentContext()) {
for (DeclContext::decl_iterator M = D->decls_begin(),
MEnd = D->decls_end();
M != MEnd; ++M)
@@ -112,6 +117,8 @@ namespace {
};
}
+void CodeGenerator::anchor() { }
+
CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
const std::string& ModuleName,
const CodeGenOptions &CGO,
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index e1dc8f7ffdbd..3ed1778cc90f 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -98,7 +98,8 @@ unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
return 32;
}
-bool TargetCodeGenInfo::isNoProtoCallVariadic(CallingConv CC) const {
+bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
// The following conventions are known to require this to be false:
// x86_stdcall
// MIPS
@@ -117,10 +118,14 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
QualType FT = FD->getType();
- // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
if (AllowArrays)
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
FT = AT->getElementType();
+ }
const RecordType *RT = FT->getAs<RecordType>();
if (!RT)
@@ -252,6 +257,11 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
}
}
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return 0;
+
return Found;
}
@@ -287,6 +297,8 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
return false;
+ uint64_t Size = 0;
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
const FieldDecl *FD = *i;
@@ -299,8 +311,14 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
// counts as "basic" is more complicated than what we were doing previously.
if (FD->isBitField())
return false;
+
+ Size += Context.getTypeSize(FD->getType());
}
+ // Make sure there are not any holes in the struct.
+ if (Size != Context.getTypeSize(Ty))
+ return false;
+
return true;
}
@@ -339,8 +357,14 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty))
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
return ABIArgInfo::getIndirect(0);
+ }
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -365,8 +389,8 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
-/// UseX86_MMXType - Return true if this is an MMX type that should use the special
-/// x86_mmx type.
+/// UseX86_MMXType - Return true if this is an MMX type that should use the
+/// special x86_mmx type.
bool UseX86_MMXType(llvm::Type *IRType) {
// If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
// special x86_mmx type.
@@ -394,12 +418,14 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsMMXDisabled;
+ bool IsWin32FloatStructABI;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
}
- static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+ unsigned callingConvention);
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
@@ -410,11 +436,13 @@ class X86_32ABIInfo : public ABIInfo {
public:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type);
@@ -423,15 +451,16 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m) {}
+ IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool d, bool p, bool m, bool w)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -459,7 +488,8 @@ public:
/// shouldReturnTypeInRegister - Determine if the given type should be
/// passed in a register (for the Darwin ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) {
+ ASTContext &Context,
+ unsigned callingConvention) {
uint64_t Size = Context.getTypeSize(Ty);
// Type must be register sized.
@@ -484,7 +514,8 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// Arrays are treated like records.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
+ return shouldReturnTypeInRegister(AT->getElementType(), Context,
+ callingConvention);
// Otherwise, it must be a record type.
const RecordType *RT = Ty->getAs<RecordType>();
@@ -492,6 +523,13 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// FIXME: Traverse bases here too.
+ // For thiscall conventions, structures will never be returned in
+ // a register. This is for compatibility with the MSVC ABI
+ if (callingConvention == llvm::CallingConv::X86_ThisCall &&
+ RT->isStructureType()) {
+ return false;
+ }
+
// Structure types are passed in register if all fields would be
// passed in a register.
for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
@@ -503,14 +541,15 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
continue;
// Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ if (!shouldReturnTypeInRegister(FD->getType(), Context,
+ callingConvention))
return false;
}
-
return true;
}
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
@@ -555,51 +594,24 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
return ABIArgInfo::getIndirect(0);
- // Classify "single element" structs as their element type.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
- if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
- if (BT->isIntegerType()) {
- // We need to use the size of the structure, padding
- // bit-fields can adjust that to be larger than the single
- // element type.
- uint64_t Size = getContext().getTypeSize(RetTy);
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), (unsigned)Size));
- }
-
- if (BT->getKind() == BuiltinType::Float) {
- assert(getContext().getTypeSize(RetTy) ==
- getContext().getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
- }
-
- if (BT->getKind() == BuiltinType::Double) {
- assert(getContext().getTypeSize(RetTy) ==
- getContext().getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
- }
- } else if (SeltTy->isPointerType()) {
- // FIXME: It would be really nice if this could come out as the proper
- // pointer type.
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
- return ABIArgInfo::getDirect(PtrTy);
- } else if (SeltTy->isVectorType()) {
- // 64- and 128-bit vectors are never returned in a
- // register when inside a structure.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size == 64 || Size == 128)
- return ABIArgInfo::getIndirect(0);
-
- return classifyReturnType(QualType(SeltTy, 0));
- }
- }
-
// Small structures which are register sized are generally returned
// in a register.
- if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
+ callingConvention)) {
uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
@@ -631,7 +643,7 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
i != e; ++i) {
QualType FT = i->getType();
- if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
+ if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
return true;
if (isRecordWithSSEVectorType(Context, FT))
@@ -655,7 +667,7 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (isRecordWithSSEVectorType(getContext(), Ty))
+ if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
return 16;
return MinABIStackAlignInBytes;
@@ -694,8 +706,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
return getIndirectResult(Ty);
}
- // Ignore empty structs.
- if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
// Expand small (<= 128-bit) record types when we know that the stack layout
@@ -743,19 +755,36 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Compute if the address needs to be aligned
+ unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
+ Align = getTypeStackAlignInBytes(Ty, Align);
+ Align = std::max(Align, 4U);
+ if (Align > 4) {
+ // addr = (addr + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
+ Addr = CGF.Builder.CreateGEP(Addr, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
+ CGF.Int32Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
+ Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ Addr->getType(),
+ "ap.cur.aligned");
+ }
+
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
@@ -782,10 +811,8 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-7 are the eight integer registers; the order is different
// on Darwin (for EH), but the range is the same.
@@ -796,7 +823,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 12-16 are st(0..4). Not sure why we stop at 4.
// These have size 16, which is sizeof(long double) on
// platforms with 8-byte alignment for that type.
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
} else {
@@ -807,7 +834,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
// platforms with 4-byte alignment for that type.
- llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
}
@@ -897,14 +924,20 @@ class X86_64ABIInfo : public ABIInfo {
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty) const;
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE) const;
+ bool IsIllegalVectorType(QualType Ty) const;
+
/// The 0.98 ABI revision clarified a lot of ambiguities,
/// unfortunately in ways that were not always consistent with
/// certain previous compilers. In particular, platforms which
@@ -914,8 +947,23 @@ class X86_64ABIInfo : public ABIInfo {
return !getContext().getTargetInfo().getTriple().isOSDarwin();
}
+ bool HasAVX;
+
public:
- X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
+ ABIInfo(CGT), HasAVX(hasavx) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return (vectorTy->getBitWidth() > 128);
+ }
+ return false;
+ }
virtual void computeInfo(CGFunctionInfo &FI) const;
@@ -939,8 +987,12 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+
+ const X86_64ABIInfo &getABIInfo() const {
+ return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 7;
@@ -948,16 +1000,11 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
- AssignToArrayRange(Builder, Address, Eight8, 0, 16);
-
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
@@ -967,13 +1014,29 @@ public:
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
- bool isNoProtoCallVariadic(CallingConv CC) const {
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
// The default CC on x86-64 sets %al to the number of SSA
// registers used, and GCC sets this when calling an unprototyped
- // function, so we override the default behavior.
- if (CC == CC_Default || CC == CC_C) return true;
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
- return TargetCodeGenInfo::isNoProtoCallVariadic(CC);
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
}
};
@@ -989,16 +1052,11 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
- AssignToArrayRange(Builder, Address, Eight8, 0, 16);
-
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
};
@@ -1164,7 +1222,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128 || Size == 256) {
+ } else if (Size == 128 || (HasAVX && Size == 256)) {
// Arguments of 256-bits are split into four eightbyte chunks. The
// least significant one belongs to class SSE and all the others to class
// SSEUP. The original Lo and Hi design considers that types can't be
@@ -1377,10 +1435,28 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = HasAVX ? 256 : 128;
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
- if (!isAggregateTypeForABI(Ty)) {
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -1395,6 +1471,38 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
return ABIArgInfo::getIndirect(Align);
}
@@ -1416,7 +1524,7 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
llvm::Type *EltTy = VT->getElementType();
unsigned BitWidth = VT->getBitWidth();
- if ((BitWidth == 128 || BitWidth == 256) &&
+ if ((BitWidth >= 128 && BitWidth <= 256) &&
(EltTy->isFloatTy() || EltTy->isDoubleTy() ||
EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
@@ -1810,8 +1918,10 @@ classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
- unsigned &neededSSE) const {
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(
+ QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE)
+ const
+{
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, 0, Lo, Hi);
@@ -1843,7 +1953,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case ComplexX87:
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
++neededInt;
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, freeIntRegs);
case SSEUp:
case X87Up:
@@ -1951,7 +2061,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, neededInt, neededSSE);
+ it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
+ neededSSE);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -1961,7 +2072,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
freeIntRegs -= neededInt;
freeSSERegs -= neededSSE;
} else {
- it->info = getIndirectResult(it->type);
+ it->info = getIndirectResult(it->type, freeIntRegs);
}
}
}
@@ -1976,19 +2087,17 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
// byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
if (Align > 8) {
- // Note that we follow the ABI & gcc here, even though the type
- // could in theory have an alignment greater than 16. This case
- // shouldn't ever matter in practice.
-
- // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, 15);
+ llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
CGF.Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
@@ -2019,8 +2128,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -2031,7 +2138,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
unsigned neededInt, neededSSE;
Ty = CGF.getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
@@ -2129,7 +2236,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
- llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ llvm::Type *DoubleTy = CGF.DoubleTy;
llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
llvm::StructType *ST = llvm::StructType::get(DoubleTy,
@@ -2192,7 +2299,8 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
if (Size == 128 &&
- getContext().getTargetInfo().getTriple().getOS() == llvm::Triple::MinGW32)
+ getContext().getTargetInfo().getTriple().getOS()
+ == llvm::Triple::MinGW32)
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
@@ -2224,8 +2332,7 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -2270,9 +2377,8 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// against gcc output. AFAIK all ABIs use the same encoding.
CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = CGF.Int8Ty;
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
@@ -2327,8 +2433,9 @@ public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
bool isEABI() const {
- StringRef Env = getContext().getTargetInfo().getTriple().getEnvironmentName();
- return (Env == "gnueabi" || Env == "eabi");
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
}
private:
@@ -2362,15 +2469,10 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-15 are the 16 integer registers.
- AssignToArrayRange(Builder, Address, Four8, 0, 15);
-
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
return false;
}
@@ -2671,6 +2773,14 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (getABIKind() == AAPCS_VFP) {
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(RetTy, Base, getContext()))
+ // Homogeneous Aggregates are returned directly.
+ return ABIArgInfo::getDirect();
+ }
+
// Aggregates <= 4 bytes are returned in r0; other aggregates
// are returned indirectly.
uint64_t Size = getContext().getTypeSize(RetTy);
@@ -2688,12 +2798,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Handle address alignment for type alignment > 32 bits
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
@@ -2773,7 +2882,7 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- const LangOptions &LangOpts = getContext().getLangOptions();
+ const LangOptions &LangOpts = getContext().getLangOpts();
if (LangOpts.OpenCL || LangOpts.CUDA) {
// If we are in OpenCL or CUDA mode, then default to device functions
DefaultCC = llvm::CallingConv::PTX_Device;
@@ -2793,7 +2902,6 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const {
llvm_unreachable("PTX does not support varargs");
- return 0;
}
void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
@@ -2805,7 +2913,7 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *F = cast<llvm::Function>(GV);
// Perform special handling in OpenCL mode
- if (M.getLangOptions().OpenCL) {
+ if (M.getLangOpts().OpenCL) {
// Use OpenCL function attributes to set proper calling conventions
// By default, all functions are device functions
if (FD->hasAttr<OpenCLKernelAttr>()) {
@@ -2817,7 +2925,7 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
// Perform special handling in CUDA mode.
- if (M.getLangOptions().CUDA) {
+ if (M.getLangOpts().CUDA) {
// CUDA __global__ functions get a kernel calling convention. Since
// __global__ functions cannot be called from the device, we do not
// need to set the noinline attribute.
@@ -2829,85 +2937,6 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
//===----------------------------------------------------------------------===//
-// SystemZ ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class SystemZABIInfo : public ABIInfo {
-public:
- SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- bool isPromotableIntegerType(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
-};
-
-}
-
-bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
- // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- case BuiltinType::Char_S:
- case BuiltinType::Char_U:
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- case BuiltinType::Short:
- case BuiltinType::UShort:
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- return false;
- }
- return false;
-}
-
-llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Implement
- return 0;
-}
-
-
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
- if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
-
- return (isPromotableIntegerType(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty))
- return ABIArgInfo::getIndirect(0);
-
- return (isPromotableIntegerType(Ty) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
-//===----------------------------------------------------------------------===//
// MBlaze ABI Implementation
//===----------------------------------------------------------------------===//
@@ -3063,24 +3092,28 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
- static const unsigned MinABIStackAlignInBytes = 4;
+ bool IsO32;
+ unsigned MinABIStackAlignInBytes;
+ llvm::Type* HandleAggregates(QualType Ty) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
- MipsABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
};
-const unsigned MipsABIInfo::MinABIStackAlignInBytes;
-
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
unsigned SizeOfUnwindException;
public:
- MIPSTargetCodeGenInfo(CodeGenTypes &CGT, unsigned SZ)
- : TargetCodeGenInfo(new MipsABIInfo(CGT)), SizeOfUnwindException(SZ) {}
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 29;
@@ -3095,35 +3128,184 @@ public:
};
}
-ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const {
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
+ if (IsO32)
+ return 0;
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType())
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ uint64_t StructSize = getContext().getTypeSize(Ty);
+ assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+ SmallVector<llvm::Type*, 8> ArgList;
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = (*i)->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ // This struct/class doesn't have an aligned double field.
+ if (!LastOffset)
+ return 0;
+
+ // Add ((StructSize - LastOffset) / 64) args of type i64.
+ for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
+ ArgList.push_back(I64);
+
+ // If the size of the remainder is not zero, add one more integer type to
+ // ArgList.
+ unsigned R = (StructSize - LastOffset) % 64;
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
+ // Padding is inserted only for N32/64.
+ if (IsO32)
+ return 0;
+
+ assert(Align <= 16 && "Alignment larger than 16 not handled.");
+ return (Align == 16 && Offset & 0xf) ?
+ llvm::IntegerType::get(getVMContext(), 64) : 0;
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize =
+ llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+ Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
+ Offset += TySize;
+
if (isAggregateTypeForABI(Ty)) {
// Ignore empty aggregates.
- if (getContext().getTypeSize(Ty) == 0)
+ if (TySize == 0)
return ABIArgInfo::getIgnore();
// Records with non trivial destructors/constructors should not be passed
// by value.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
+ Offset = OrigOffset + 8;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
- return ABIArgInfo::getIndirect(0);
+ // If we have reached here, aggregates are passed either indirectly via a
+ // byval pointer or directly by coercing to another structure type. In the
+ // latter case, padding is inserted if the offset of the aggregate is
+ // unaligned.
+ llvm::Type *ResType = HandleAggregates(Ty);
+
+ if (!ResType)
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 2> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType((*b)->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ RTList.push_back(llvm::IntegerType::get(getVMContext(),
+ std::min(Size, (uint64_t)64)));
+ if (Size > 64)
+ RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
+
+ return llvm::StructType::get(getVMContext(), RTList);
}
ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType() || Size == 0)
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy)) {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ }
return ABIArgInfo::getIndirect(0);
}
@@ -3137,29 +3319,36 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
}
void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ it->info = classifyArgumentType(it->type, Offset);
}
llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped;
+ unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
+ llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
if (TypeAlign > MinABIStackAlignInBytes) {
- llvm::Value *AddrAsInt32 = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- llvm::Value *Inc = llvm::ConstantInt::get(CGF.Int32Ty, TypeAlign - 1);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -TypeAlign);
- llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt32, Inc);
+ llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
+ llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
+ llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
}
@@ -3167,11 +3356,11 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
AddrTyped = Builder.CreateBitCast(Addr, PTy);
llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
- TypeAlign = std::max(TypeAlign, MinABIStackAlignInBytes);
+ TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
llvm::Value *NextAddr =
- Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -3184,19 +3373,15 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// This information comes from gcc's implementation, which seems to
// as canonical as it gets.
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
// Everything on MIPS is 4 bytes. Double-precision FP registers
// are aliased to pairs of single-precision FP registers.
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-31 are the general purpose registers, $0 - $31.
// 32-63 are the floating-point registers, $f0 - $f31.
// 64 and 65 are the multiply/divide registers, $hi and $lo.
// 66 is the (notional, I think) register for signal-handler return.
- AssignToArrayRange(Builder, Address, Four8, 0, 65);
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
// 67-74 are the floating-point status registers, $fcc0 - $fcc7.
// They are one bit wide and ignored here.
@@ -3206,8 +3391,7 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
// 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
// 176-181 are the DSP accumulator registers.
- AssignToArrayRange(Builder, Address, Four8, 80, 181);
-
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
return false;
}
@@ -3236,7 +3420,7 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *F = cast<llvm::Function>(GV);
- if (M.getLangOptions().OpenCL) {
+ if (M.getLangOpts().OpenCL) {
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL C Kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
@@ -3251,27 +3435,20 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
SmallVector<llvm::Value*, 5> Operands;
Operands.push_back(F);
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
- FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
// Add a boolean constant operand for "required" (true) or "hint" (false)
// for implementing the work_group_size_hint attr later. Currently
// always true as the hint is not yet implemented.
- Operands.push_back(llvm::ConstantInt::getTrue(llvm::Type::getInt1Ty(Context)));
-
+ Operands.push_back(llvm::ConstantInt::getTrue(Context));
OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
}
}
@@ -3280,6 +3457,147 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public ABIInfo {
+
+
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 29;
+ }
+};
+
+}
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 64)
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // Pass in the smallest viable integer type.
+ else if (Size > 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ else if (Size > 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ else if (Size > 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ else
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+}
+
+llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
@@ -3291,11 +3609,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 24));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 32));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
case llvm::Triple::arm:
case llvm::Triple::thumb:
@@ -3317,9 +3635,6 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::ptx64:
return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
- case llvm::Triple::systemz:
- return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
-
case llvm::Triple::mblaze:
return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
@@ -3334,7 +3649,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, true, true, DisableMMX, false));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3343,24 +3659,36 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
- case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, false));
+
+ case llvm::Triple::Win32:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, true));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, false, false, DisableMMX, false));
}
}
- case llvm::Triple::x86_64:
+ case llvm::Triple::x86_64: {
+ bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
+
switch (Triple.getOS()) {
case llvm::Triple::Win32:
case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
default:
- return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
+ HasAVX));
}
}
+ case llvm::Triple::hexagon:
+ return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ }
}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 8f90c7bdd92d..88b4997d48cc 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -30,8 +30,10 @@ namespace clang {
class Decl;
namespace CodeGen {
+ class CallArgList;
class CodeGenModule;
class CodeGenFunction;
+ class CGFunctionInfo;
}
/// TargetCodeGenInfo - This class organizes various target-specific
@@ -160,7 +162,8 @@ namespace clang {
/// same way and some out-of-band information is passed for the
/// benefit of variadic callees, as is the case for x86-64.
/// In this case the ABI should be consulted.
- virtual bool isNoProtoCallVariadic(CallingConv CC) const;
+ virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
+ const FunctionNoProtoType *fnType) const;
};
}