aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-10-15 16:01:12 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-10-15 16:01:12 +0000
commit6befc758a38754c930daf51e1094f210afb70f25 (patch)
tree5b8466e9c648c3915ad18896362cd43885deca63
parent7f04c68256951282ce8e0136371ef97410ed7db6 (diff)
Vendor import of llvm-project branch release/15.x llvmorg-15.0.2-0-g4bd3f3759259.vendor/llvm-project/llvmorg-15.0.2-0-g4bd3f3759259
-rw-r--r--clang/lib/AST/StmtPrinter.cpp2
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp15
-rw-r--r--clang/lib/Sema/SemaInit.cpp8
-rw-r--r--libcxx/include/__config33
-rw-r--r--libcxx/include/stdatomic.h4
-rw-r--r--lld/ELF/Arch/RISCV.cpp9
-rw-r--r--llvm/include/llvm/ADT/GenericCycleImpl.h29
-rw-r--r--llvm/include/llvm/ADT/GenericCycleInfo.h19
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp13
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h2
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp5
15 files changed, 102 insertions, 48 deletions
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 8d778500d103..625048c69a86 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -1993,7 +1993,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
assert(Args);
- if (Args->size() != 1) {
+ if (Args->size() != 1 || Args->get(0).getKind() != TemplateArgument::Pack) {
const TemplateParameterList *TPL = nullptr;
if (!DRE->hadMultipleCandidates())
if (const auto *TD = dyn_cast<TemplateDecl>(DRE->getDecl()))
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 481438de0e53..9935fcc0d3ea 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -1509,6 +1509,21 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
llvm::ConstantInt *CaseVal =
Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+
+ // Emit debuginfo for the case value if it is an enum value.
+ const ConstantExpr *CE;
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
+ CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
+ else
+ CE = dyn_cast<ConstantExpr>(S.getLHS());
+ if (CE) {
+ if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo())
+ Dbg->EmitGlobalVariable(DE->getDecl(),
+ APValue(llvm::APSInt(CaseVal->getValue())));
+ }
+
if (SwitchLikelihood)
SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index d3b454843234..bf7ca718a36b 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -695,10 +695,10 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
// member of reference type uninitialized, the program is
// ill-formed.
SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized)
- << Field->getType()
- << ILE->getSyntacticForm()->getSourceRange();
- SemaRef.Diag(Field->getLocation(),
- diag::note_uninit_reference_member);
+ << Field->getType()
+ << (ILE->isSyntacticForm() ? ILE : ILE->getSyntacticForm())
+ ->getSourceRange();
+ SemaRef.Diag(Field->getLocation(), diag::note_uninit_reference_member);
}
hadError = true;
return;
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 2f80df35f909..01377a9617ea 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -22,9 +22,21 @@
# pragma GCC system_header
#endif
+#if defined(__apple_build_version__)
+# define _LIBCPP_COMPILER_CLANG_BASED
+# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000)
+#elif defined(__clang__)
+# define _LIBCPP_COMPILER_CLANG_BASED
+# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__)
+#elif defined(__GNUC__)
+# define _LIBCPP_COMPILER_GCC
+#elif defined(_MSC_VER)
+# define _LIBCPP_COMPILER_MSVC
+#endif
+
#ifdef __cplusplus
-# define _LIBCPP_VERSION 15001
+# define _LIBCPP_VERSION 15002
# define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y
# define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y)
@@ -198,18 +210,6 @@
# define __has_include(...) 0
# endif
-# if defined(__apple_build_version__)
-# define _LIBCPP_COMPILER_CLANG_BASED
-# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000)
-# elif defined(__clang__)
-# define _LIBCPP_COMPILER_CLANG_BASED
-# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__)
-# elif defined(__GNUC__)
-# define _LIBCPP_COMPILER_GCC
-# elif defined(_MSC_VER)
-# define _LIBCPP_COMPILER_MSVC
-# endif
-
# if !defined(_LIBCPP_COMPILER_CLANG_BASED) && __cplusplus < 201103L
# error "libc++ only supports C++03 with Clang-based compilers. Please enable C++11"
# endif
@@ -1101,6 +1101,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
# define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION
# endif // _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES
+// Leave the deprecation notices in by default, but don't remove unary_function and
+// binary_function entirely just yet. That way, folks will have one release to act
+// on the deprecation warnings.
+# ifndef _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION
+# define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION
+# endif
+
# if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES)
# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION
diff --git a/libcxx/include/stdatomic.h b/libcxx/include/stdatomic.h
index d9550c44061c..ff2a5682f5bb 100644
--- a/libcxx/include/stdatomic.h
+++ b/libcxx/include/stdatomic.h
@@ -121,7 +121,7 @@ using std::atomic_signal_fence // see below
# pragma GCC system_header
#endif
-#if _LIBCPP_STD_VER > 20
+#if defined(__cplusplus) && _LIBCPP_STD_VER > 20
#include <atomic>
#include <version>
@@ -230,6 +230,6 @@ using std::atomic_thread_fence _LIBCPP_USING_IF_EXISTS;
# include_next <stdatomic.h>
# endif
-#endif // _LIBCPP_STD_VER > 20
+#endif // defined(__cplusplus) && _LIBCPP_STD_VER > 20
#endif // _LIBCPP_STDATOMIC_H
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 8fca1a686a79..56a516f9cdc1 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -750,12 +750,13 @@ void elf::riscvFinalizeRelax(int passes) {
p += size;
// For R_RISCV_ALIGN, we will place `offset` in a location (among NOPs)
- // to satisfy the alignment requirement. If `remove` is a multiple of 4,
- // it is as if we have skipped some NOPs. Otherwise we are in the middle
- // of a 4-byte NOP, and we need to rewrite the NOP sequence.
+ // to satisfy the alignment requirement. If both `remove` and r.addend
+ // are multiples of 4, it is as if we have skipped some NOPs. Otherwise
+ // we are in the middle of a 4-byte NOP, and we need to rewrite the NOP
+ // sequence.
int64_t skip = 0;
if (r.type == R_RISCV_ALIGN) {
- if (remove % 4 != 0) {
+ if (remove % 4 || r.addend % 4) {
skip = r.addend - remove;
int64_t j = 0;
for (; j + 4 <= skip; j += 4)
diff --git a/llvm/include/llvm/ADT/GenericCycleImpl.h b/llvm/include/llvm/ADT/GenericCycleImpl.h
index ea2847f8c8ee..07ac1768ea27 100644
--- a/llvm/include/llvm/ADT/GenericCycleImpl.h
+++ b/llvm/include/llvm/ADT/GenericCycleImpl.h
@@ -144,8 +144,12 @@ private:
};
template <typename ContextT>
-auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(
- const BlockT *Block) const -> CycleT * {
+auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(BlockT *Block)
+ -> CycleT * {
+ auto Cycle = BlockMapTopLevel.find(Block);
+ if (Cycle != BlockMapTopLevel.end())
+ return Cycle->second;
+
auto MapIt = BlockMap.find(Block);
if (MapIt == BlockMap.end())
return nullptr;
@@ -153,12 +157,15 @@ auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(
auto *C = MapIt->second;
while (C->ParentCycle)
C = C->ParentCycle;
+ BlockMapTopLevel.try_emplace(Block, C);
return C;
}
template <typename ContextT>
-void GenericCycleInfo<ContextT>::moveToNewParent(CycleT *NewParent,
- CycleT *Child) {
+void GenericCycleInfo<ContextT>::moveTopLevelCycleToNewParent(CycleT *NewParent,
+ CycleT *Child) {
+ assert((!Child->ParentCycle && !NewParent->ParentCycle) &&
+ "NewParent and Child must be both top level cycle!\n");
auto &CurrentContainer =
Child->ParentCycle ? Child->ParentCycle->Children : TopLevelCycles;
auto Pos = llvm::find_if(CurrentContainer, [=](const auto &Ptr) -> bool {
@@ -169,6 +176,13 @@ void GenericCycleInfo<ContextT>::moveToNewParent(CycleT *NewParent,
*Pos = std::move(CurrentContainer.back());
CurrentContainer.pop_back();
Child->ParentCycle = NewParent;
+
+ NewParent->Blocks.insert(NewParent->Blocks.end(), Child->block_begin(),
+ Child->block_end());
+
+ for (auto &It : BlockMapTopLevel)
+ if (It.second == Child)
+ It.second = NewParent;
}
/// \brief Main function of the cycle info computations.
@@ -240,10 +254,7 @@ void GenericCycleInfoCompute<ContextT>::run(BlockT *EntryBlock) {
<< "discovered child cycle "
<< Info.Context.print(BlockParent->getHeader()) << "\n");
// Make BlockParent the child of NewCycle.
- Info.moveToNewParent(NewCycle.get(), BlockParent);
- NewCycle->Blocks.insert(NewCycle->Blocks.end(),
- BlockParent->block_begin(),
- BlockParent->block_end());
+ Info.moveTopLevelCycleToNewParent(NewCycle.get(), BlockParent);
for (auto *ChildEntry : BlockParent->entries())
ProcessPredecessors(ChildEntry);
@@ -257,6 +268,7 @@ void GenericCycleInfoCompute<ContextT>::run(BlockT *EntryBlock) {
assert(!is_contained(NewCycle->Blocks, Block));
NewCycle->Blocks.push_back(Block);
ProcessPredecessors(Block);
+ Info.BlockMapTopLevel.try_emplace(Block, NewCycle.get());
}
} while (!Worklist.empty());
@@ -336,6 +348,7 @@ void GenericCycleInfoCompute<ContextT>::dfs(BlockT *EntryBlock) {
template <typename ContextT> void GenericCycleInfo<ContextT>::clear() {
TopLevelCycles.clear();
BlockMap.clear();
+ BlockMapTopLevel.clear();
}
/// \brief Compute the cycle info for a function.
diff --git a/llvm/include/llvm/ADT/GenericCycleInfo.h b/llvm/include/llvm/ADT/GenericCycleInfo.h
index 970664b85715..5f851b795cbc 100644
--- a/llvm/include/llvm/ADT/GenericCycleInfo.h
+++ b/llvm/include/llvm/ADT/GenericCycleInfo.h
@@ -232,15 +232,24 @@ public:
private:
ContextT Context;
- /// Map basic blocks to their inner-most containing loop.
+ /// Map basic blocks to their inner-most containing cycle.
DenseMap<BlockT *, CycleT *> BlockMap;
+ /// Map basic blocks to their top level containing cycle.
+ DenseMap<BlockT *, CycleT *> BlockMapTopLevel;
+
/// Outermost cycles discovered by any DFS.
///
/// Note: The implementation treats the nullptr as the parent of
/// every top-level cycle. See \ref contains for an example.
std::vector<std::unique_ptr<CycleT>> TopLevelCycles;
+ /// Move \p Child to \p NewParent by manipulating Children vectors.
+ ///
+ /// Note: This is an incomplete operation that does not update the depth of
+ /// the subtree.
+ void moveTopLevelCycleToNewParent(CycleT *NewParent, CycleT *Child);
+
public:
GenericCycleInfo() = default;
GenericCycleInfo(GenericCycleInfo &&) = default;
@@ -254,13 +263,7 @@ public:
CycleT *getCycle(const BlockT *Block) const;
unsigned getCycleDepth(const BlockT *Block) const;
- CycleT *getTopLevelParentCycle(const BlockT *Block) const;
-
- /// Move \p Child to \p NewParent by manipulating Children vectors.
- ///
- /// Note: This is an incomplete operation that does not update the
- /// list of blocks in the new parent or the depth of the subtree.
- void moveToNewParent(CycleT *NewParent, CycleT *Child);
+ CycleT *getTopLevelParentCycle(BlockT *Block);
/// Methods for debug and self-test.
//@{
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2dd671b4ab9e..569ee6b3ea86 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3593,14 +3593,23 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
// Unsigned integers are always nonnegative.
case Instruction::UIToFP:
return true;
- case Instruction::FMul:
case Instruction::FDiv:
- // X * X is always non-negative or a NaN.
// X / X is always exactly 1.0 or a NaN.
if (I->getOperand(0) == I->getOperand(1) &&
(!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
return true;
+ // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
+ return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
+ Depth + 1) &&
+ cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
+ /*SignBitOnly*/ true, Depth + 1);
+ case Instruction::FMul:
+ // X * X is always non-negative or a NaN.
+ if (I->getOperand(0) == I->getOperand(1) &&
+ (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
+ return true;
+
LLVM_FALLTHROUGH;
case Instruction::FAdd:
case Instruction::FRem:
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index bd0232c71d48..f37c50900adb 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -149,6 +149,7 @@ private:
Align H)
: Inst(I), Addr(A), ValTy(T), HaveAlign(H),
NeedAlign(HVC.getTypeAlignment(ValTy)) {}
+ AddrInfo &operator=(const AddrInfo &) = default;
// XXX: add Size member?
Instruction *Inst;
@@ -185,6 +186,7 @@ private:
Segment(Value *Val, int Begin, int Len)
: Val(Val), Start(Begin), Size(Len) {}
Segment(const Segment &Seg) = default;
+ Segment &operator=(const Segment &Seg) = default;
Value *Val; // Value representable as a sequence of bytes.
int Start; // First byte of the value that belongs to the segment.
int Size; // Number of bytes in the segment.
@@ -195,6 +197,7 @@ private:
Block(Value *Val, int Off, int Len, int Pos)
: Seg(Val, Off, Len), Pos(Pos) {}
Block(const Block &Blk) = default;
+ Block &operator=(const Block &Blk) = default;
Segment Seg; // Value segment.
int Pos; // Position (offset) of the segment in the Block.
};
diff --git a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
index 1926977ea66e..2e426bb79cae 100644
--- a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
@@ -70,6 +70,7 @@ void SPIRVGeneralDuplicatesTracker::buildDepsGraph(
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
if (MMI) {
const Module *M = MMI->getModule();
for (auto F = M->begin(), E = M->end(); F != E; ++F) {
@@ -92,4 +93,5 @@ void SPIRVGeneralDuplicatesTracker::buildDepsGraph(
}
}
}
+#endif
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2cdae028ec7d..5fd4e45d80fb 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8154,7 +8154,7 @@ VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI(
Phi, Operands[0], *II, *PSE.getSE(),
LoopVectorizationPlanner::getDecisionAndClampRange(
[&](ElementCount VF) {
- return !VF.isScalable() && CM.isScalarAfterVectorization(Phi, VF);
+ return CM.isScalarAfterVectorization(Phi, VF);
},
Range));
}
@@ -9338,7 +9338,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
auto *IVR = getParent()->getPlan()->getCanonicalIV();
PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
- if (onlyScalarsGenerated()) {
+ if (onlyScalarsGenerated(State.VF)) {
// This is the normalized GEP that starts counting at zero.
Value *PtrInd = State.Builder.CreateSExtOrTrunc(
CanonicalIV, IndDesc.getStep()->getType());
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 0662ca883252..30032dda7f60 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -698,7 +698,7 @@ void VPlan::execute(VPTransformState *State) {
auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
// TODO: Split off the case that all users of a pointer phi are scalar
// from the VPWidenPointerInductionRecipe.
- if (WidenPhi->onlyScalarsGenerated())
+ if (WidenPhi->onlyScalarsGenerated(State->VF))
continue;
auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 329843bf977d..7a6bc48e2aee 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1220,7 +1220,7 @@ public:
void execute(VPTransformState &State) override;
/// Returns true if only scalar values will be generated.
- bool onlyScalarsGenerated();
+ bool onlyScalarsGenerated(ElementCount VF);
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index f214563af6cc..9a404640b4b8 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -982,8 +982,9 @@ void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
}
#endif
-bool VPWidenPointerInductionRecipe::onlyScalarsGenerated() {
- return IsScalarAfterVectorization;
+bool VPWidenPointerInductionRecipe::onlyScalarsGenerated(ElementCount VF) {
+ return IsScalarAfterVectorization &&
+ (!VF.isScalable() || vputils::onlyFirstLaneUsed(this));
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)