aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGExprAgg.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2011-10-20 21:14:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2011-10-20 21:14:49 +0000
commit36981b17ed939300f6f8fc2355a255f711fcef71 (patch)
treeee2483e98b09cac943dc93a6969d83ca737ff139 /lib/CodeGen/CGExprAgg.cpp
parent180abc3db9ae3b4fc63cd65b15697e6ffcc8a657 (diff)
downloadsrc-36981b17ed939300f6f8fc2355a255f711fcef71.tar.gz
src-36981b17ed939300f6f8fc2355a255f711fcef71.zip
Vendor import of clang release_30 branch r142614:vendor/clang/clang-r142614
Notes
Notes: svn path=/vendor/clang/dist/; revision=226586 svn path=/vendor/clang/clang-r142614/; revision=226587; tag=vendor/clang/clang-r142614
Diffstat (limited to 'lib/CodeGen/CGExprAgg.cpp')
-rw-r--r--lib/CodeGen/CGExprAgg.cpp156
1 files changed, 91 insertions, 65 deletions
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 915ffd6034e2..97754d5c0ba6 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -35,11 +35,18 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot Dest;
bool IgnoreResult;
+ /// We want to use 'dest' as the return slot except under two
+ /// conditions:
+ /// - The destination slot requires garbage collection, so we
+ /// need to use the GC API.
+ /// - The destination slot is potentially aliased.
+ bool shouldUseDestForReturnSlot() const {
+ return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
+ }
+
ReturnValueSlot getReturnValueSlot() const {
- // If the destination slot requires garbage collection, we can't
- // use the real return value slot, because we have to use the GC
- // API.
- if (Dest.requiresGCollection()) return ReturnValueSlot();
+ if (!shouldUseDestForReturnSlot())
+ return ReturnValueSlot();
return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
}
@@ -69,7 +76,13 @@ public:
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
- void EmitGCMove(const Expr *E, RValue Src);
+ void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+
+ AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
+ if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
+ return AggValueSlot::NeedsGCBarriers;
+ return AggValueSlot::DoesNotNeedGCBarriers;
+ }
bool TypeRequiresGCollection(QualType T);
@@ -141,6 +154,9 @@ public:
void EmitNullInitializationToLValue(LValue Address);
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+ void VisitAtomicExpr(AtomicExpr *E) {
+ CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ }
};
} // end anonymous namespace.
@@ -173,23 +189,27 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
return Record->hasObjectMember();
}
-/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
+/// \brief Perform the final move to DestPtr if for some reason
+/// getReturnValueSlot() didn't use it directly.
///
/// The idea is that you do something like this:
/// RValue Result = EmitSomething(..., getReturnValueSlot());
-/// EmitGCMove(E, Result);
-/// If GC doesn't interfere, this will cause the result to be emitted
-/// directly into the return value slot. If GC does interfere, a final
-/// move will be performed.
-void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (Dest.requiresGCollection()) {
- CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
- Src.getAggregateAddr(),
- SizeVal);
+/// EmitMoveFromReturnSlot(E, Result);
+///
+/// If nothing interferes, this will cause the result to be emitted
+/// directly into the return value slot. Otherwise, a final move
+/// will be performed.
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+ if (shouldUseDestForReturnSlot()) {
+ // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
+ // The possibility of undef rvalues complicates that a lot,
+ // though, so we can't really assert.
+ return;
}
+
+ // Otherwise, do a final copy,
+ assert(Dest.getAddr() != Src.getAggregateAddr());
+ EmitFinalDestCopy(E, Src, /*Ignore*/ true);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -215,7 +235,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
if (Dest.requiresGCollection()) {
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
Dest.getAddr(),
@@ -301,16 +321,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_DerivedToBase:
case CK_BaseToDerived:
case CK_UncheckedDerivedToBase: {
- assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
+ llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
"should have been unpacked before we got here");
- break;
}
case CK_GetObjCProperty: {
LValue LV = CGF.EmitLValue(E->getSubExpr());
assert(LV.isPropertyRef());
RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
break;
}
@@ -348,7 +367,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingRealToComplex:
@@ -361,9 +381,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
- case CK_ObjCProduceObject:
- case CK_ObjCConsumeObject:
- case CK_ObjCReclaimReturnedObject:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -375,12 +396,12 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
}
RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
@@ -426,10 +447,9 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// as it may change the 'forwarding' field via call to Block_copy.
LValue RHS = CGF.EmitLValue(E->getRHS());
LValue LHS = CGF.EmitLValue(E->getLHS());
- bool GCollection = false;
- if (CGF.getContext().getLangOptions().getGCMode())
- GCollection = TypeRequiresGCollection(E->getLHS()->getType());
- Dest = AggValueSlot::forLValue(LHS, true, GCollection);
+ Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
EmitFinalDestCopy(E, RHS, true);
return;
}
@@ -451,13 +471,11 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
} else {
- bool GCollection = false;
- if (CGF.getContext().getLangOptions().getGCMode())
- GCollection = TypeRequiresGCollection(E->getLHS()->getType());
-
// Codegen the RHS so that it stores directly into the LHS.
- AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
- GCollection);
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
EmitFinalDestCopy(E, LHS, true);
}
@@ -476,7 +494,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
// Save whether the destination's lifetime is externally managed.
- bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
+ bool isExternallyDestructed = Dest.isExternallyDestructed();
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
@@ -489,8 +507,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// If the result of an agg expression is unused, then the emission
// of the LHS might need to create a destination slot. That's fine
// with us, and we can safely emit the RHS into the same slot, but
- // we shouldn't claim that its lifetime is externally managed.
- Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
+ // we shouldn't claim that it's already being destructed.
+ Dest.setExternallyDestructed(isExternallyDestructed);
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
@@ -518,16 +536,17 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Ensure that we have a slot, but if we already do, remember
- // whether its lifetime was externally managed.
- bool WasManaged = Dest.isLifetimeExternallyManaged();
+ // whether it was externally destructed.
+ bool wasExternallyDestructed = Dest.isExternallyDestructed();
Dest = EnsureSlot(E->getType());
- Dest.setLifetimeExternallyManaged();
+
+ // We're going to push a destructor if there isn't already one.
+ Dest.setExternallyDestructed();
Visit(E->getSubExpr());
- // Set up the temporary's destructor if its lifetime wasn't already
- // being managed.
- if (!WasManaged)
+ // Push that destructor we promised.
+ if (!wasExternallyDestructed)
CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
}
@@ -596,7 +615,10 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(type)) {
- CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
Dest.isZeroed()));
} else if (LV.isSimple()) {
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
@@ -647,9 +669,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- const llvm::PointerType *APType =
+ llvm::PointerType *APType =
cast<llvm::PointerType>(DestPtr->getType());
- const llvm::ArrayType *AType =
+ llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
uint64_t NumInitElements = E->getNumInits();
@@ -676,7 +698,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
@@ -839,7 +861,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We'll need to enter cleanup scopes in case any of the member
// initializers throw an exception.
- llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
@@ -948,7 +970,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
- CGF.getContext().Target.getPointerWidth(0));
+ CGF.getContext().getTargetInfo().getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
@@ -999,7 +1021,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
Loc = CGF.Builder.CreateBitCast(Loc, BP);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
@@ -1036,7 +1058,9 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
return LV;
}
@@ -1049,7 +1073,9 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
- Record->hasTrivialCopyAssignment()) &&
+ Record->hasTrivialCopyAssignment() ||
+ Record->hasTrivialMoveConstructor() ||
+ Record->hasTrivialMoveAssignment()) &&
"Trying to aggregate-copy a type without a trivial copy "
"constructor or assignment operator");
// Ignore empty classes in C++.
@@ -1088,24 +1114,24 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- const llvm::Type *DBP =
+ llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ llvm::Type *DBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP);
- const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- const llvm::Type *SBP =
+ llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::Type *SBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
+ if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -1116,7 +1142,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,