aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR/Verifier.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/IR/Verifier.cpp')
-rw-r--r--llvm/lib/IR/Verifier.cpp514
1 files changed, 419 insertions, 95 deletions
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 83e42bc184ff..1408ce293ca6 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -38,6 +38,11 @@
// * A landingpad instruction must be the first non-PHI instruction in the
// block.
// * Landingpad instructions must be in a function with a personality function.
+// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
+// The applied restrictions are too numerous to list here.
+// * The convergence entry intrinsic and the loop heart must be the first
+// non-PHI instruction in their respective block. This does not conflict with
+// the landing pads, since these two kinds cannot occur in the same block.
// * All other things that are tested by asserts spread about the code...
//
//===----------------------------------------------------------------------===//
@@ -48,6 +53,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
@@ -58,6 +64,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/Argument.h"
+#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -66,12 +73,14 @@
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/CycleInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GCStrategy.h"
#include "llvm/IR/GlobalAlias.h"
@@ -85,6 +94,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
@@ -220,6 +230,8 @@ private:
AL->print(*OS);
}
+ void Write(Printable P) { *OS << P << '\n'; }
+
template <typename T> void Write(ArrayRef<T> Vs) {
for (const T &V : Vs)
Write(V);
@@ -317,6 +329,13 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// The current source language.
dwarf::SourceLanguage CurrentSourceLang = dwarf::DW_LANG_lo_user;
+ /// Whether the current function has convergencectrl operand bundles.
+ enum {
+ ControlledConvergence,
+ UncontrolledConvergence,
+ NoConvergence
+ } ConvergenceKind = NoConvergence;
+
/// Whether source was present on the first DIFile encountered in each CU.
DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
@@ -328,6 +347,10 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
// terminators that indicate the unwind, used to detect cycles therein.
MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
+ /// Cache which blocks are in which funclet, if an EH funclet personality is
+ /// in use. Otherwise empty.
+ DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
+
/// Cache of constants visited in search of ConstantExprs.
SmallPtrSet<const Constant *, 32> ConstantExprVisited;
@@ -392,6 +415,8 @@ public:
// FIXME: We strip const here because the inst visitor strips const.
visit(const_cast<Function &>(F));
verifySiblingFuncletUnwinds();
+ if (ConvergenceKind == ControlledConvergence)
+ verifyConvergenceControl(const_cast<Function &>(F));
InstsInThisBlock.clear();
DebugFnArgs.clear();
LandingPadResultTy = nullptr;
@@ -399,6 +424,7 @@ public:
SiblingFuncletInfo.clear();
verifyNoAliasScopeDecl();
NoAliasScopeDecls.clear();
+ ConvergenceKind = NoConvergence;
return !Broken;
}
@@ -467,6 +493,8 @@ private:
void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
void visitFunction(const Function &F);
void visitBasicBlock(BasicBlock &BB);
+ void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
+ bool IsAbsoluteSymbol);
void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
void visitProfMetadata(Instruction &I, MDNode *MD);
@@ -572,6 +600,7 @@ private:
void verifyStatepoint(const CallBase &Call);
void verifyFrameRecoverIndices();
void verifySiblingFuncletUnwinds();
+ void verifyConvergenceControl(Function &F);
void verifyFragmentExpression(const DbgVariableIntrinsic &I);
template <typename ValueOrMetadata>
@@ -653,7 +682,37 @@ void Verifier::visitGlobalValue(const GlobalValue &GV) {
Check(A->value() <= Value::MaximumAlignment,
"huge alignment values are unsupported", GO);
}
+
+ if (const MDNode *Associated =
+ GO->getMetadata(LLVMContext::MD_associated)) {
+ Check(Associated->getNumOperands() == 1,
+ "associated metadata must have one operand", &GV, Associated);
+ const Metadata *Op = Associated->getOperand(0).get();
+ Check(Op, "associated metadata must have a global value", GO, Associated);
+
+ const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
+ Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
+ if (VM) {
+ Check(isa<PointerType>(VM->getValue()->getType()),
+ "associated value must be pointer typed", GV, Associated);
+
+ const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
+ Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
+ "associated metadata must point to a GlobalObject", GO, Stripped);
+ Check(Stripped != GO,
+ "global values should not associate to themselves", GO,
+ Associated);
+ }
+ }
+
+ // FIXME: Why is getMetadata on GlobalValue protected?
+ if (const MDNode *AbsoluteSymbol =
+ GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
+ verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
+ true);
+ }
}
+
Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
"Only global variables can have appending linkage!", &GV);
@@ -748,10 +807,8 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
"the third field of the element type is mandatory, "
"specify ptr null to migrate from the obsoleted 2-field form");
Type *ETy = STy->getTypeAtIndex(2);
- Type *Int8Ty = Type::getInt8Ty(ETy->getContext());
- Check(ETy->isPointerTy() &&
- cast<PointerType>(ETy)->isOpaqueOrPointeeTypeMatches(Int8Ty),
- "wrong type for intrinsic global variable", &GV);
+ Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
+ &GV);
}
}
@@ -801,9 +858,11 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
Check(!isa<ScalableVectorType>(GV.getValueType()),
"Globals cannot contain scalable vectors", &GV);
- if (auto *STy = dyn_cast<StructType>(GV.getValueType()))
- Check(!STy->containsScalableVectorType(),
+ if (auto *STy = dyn_cast<StructType>(GV.getValueType())) {
+ SmallPtrSet<Type *, 4> Visited;
+ Check(!STy->containsScalableVectorType(&Visited),
"Globals cannot contain scalable vectors", &GV);
+ }
// Check if it's a target extension type that disallows being used as a
// global.
@@ -1048,8 +1107,8 @@ void Verifier::visitDISubrange(const DISubrange &N) {
isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
"Count must be signed constant or DIVariable or DIExpression", &N);
auto Count = N.getCount();
- CheckDI(!Count || !Count.is<ConstantInt *>() ||
- Count.get<ConstantInt *>()->getSExtValue() >= -1,
+ CheckDI(!Count || !isa<ConstantInt *>(Count) ||
+ cast<ConstantInt *>(Count)->getSExtValue() >= -1,
"invalid subrange count", &N);
auto *LBound = N.getRawLowerBound();
CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
@@ -1354,9 +1413,11 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
auto *Node = dyn_cast<MDTuple>(RawNode);
CheckDI(Node, "invalid retained nodes list", &N, RawNode);
for (Metadata *Op : Node->operands()) {
- CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
- "invalid retained nodes, expected DILocalVariable or DILabel", &N,
- Node, Op);
+ CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
+ isa<DIImportedEntity>(Op)),
+ "invalid retained nodes, expected DILocalVariable, DILabel or "
+ "DIImportedEntity",
+ &N, Node, Op);
}
}
CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
@@ -1373,6 +1434,8 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
} else {
// Subprogram declarations (part of the type hierarchy).
CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
+ CheckDI(!N.getRawDeclaration(),
+ "subprogram declaration must not have a declaration field");
}
if (auto *RawThrownTypes = N.getRawThrownTypes()) {
@@ -1875,7 +1938,7 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
}
}
- if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (isa<PointerType>(Ty)) {
if (Attrs.hasAttribute(Attribute::ByVal)) {
if (Attrs.hasAttribute(Attribute::Alignment)) {
Align AttrAlign = Attrs.getAlignment().valueOrOne();
@@ -1902,38 +1965,14 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
Check(Attrs.getPreallocatedType()->isSized(&Visited),
"Attribute 'preallocated' does not support unsized types!", V);
}
- if (!PTy->isOpaque()) {
- if (!isa<PointerType>(PTy->getNonOpaquePointerElementType()))
- Check(!Attrs.hasAttribute(Attribute::SwiftError),
- "Attribute 'swifterror' only applies to parameters "
- "with pointer to pointer type!",
- V);
- if (Attrs.hasAttribute(Attribute::ByRef)) {
- Check(Attrs.getByRefType() == PTy->getNonOpaquePointerElementType(),
- "Attribute 'byref' type does not match parameter!", V);
- }
-
- if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
- Check(Attrs.getByValType() == PTy->getNonOpaquePointerElementType(),
- "Attribute 'byval' type does not match parameter!", V);
- }
-
- if (Attrs.hasAttribute(Attribute::Preallocated)) {
- Check(Attrs.getPreallocatedType() ==
- PTy->getNonOpaquePointerElementType(),
- "Attribute 'preallocated' type does not match parameter!", V);
- }
-
- if (Attrs.hasAttribute(Attribute::InAlloca)) {
- Check(Attrs.getInAllocaType() == PTy->getNonOpaquePointerElementType(),
- "Attribute 'inalloca' type does not match parameter!", V);
- }
+ }
- if (Attrs.hasAttribute(Attribute::ElementType)) {
- Check(Attrs.getElementType() == PTy->getNonOpaquePointerElementType(),
- "Attribute 'elementtype' type does not match parameter!", V);
- }
- }
+ if (Attrs.hasAttribute(Attribute::NoFPClass)) {
+ uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
+ Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
+ V);
+ Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
+ "Invalid value for 'nofpclass' test mask", V);
}
}
@@ -2142,10 +2181,13 @@ void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
if (VScaleMin == 0)
CheckFailed("'vscale_range' minimum must be greater than 0", V);
-
+ else if (!isPowerOf2_32(VScaleMin))
+ CheckFailed("'vscale_range' minimum must be power-of-two value", V);
std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
if (VScaleMax && VScaleMin > VScaleMax)
CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
+ else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
+ CheckFailed("'vscale_range' maximum must be power-of-two value", V);
}
if (Attrs.hasFnAttr("frame-pointer")) {
@@ -2484,6 +2526,118 @@ void Verifier::verifySiblingFuncletUnwinds() {
}
}
+void Verifier::verifyConvergenceControl(Function &F) {
+ DenseMap<BasicBlock *, SmallVector<CallBase *, 8>> LiveTokenMap;
+ DenseMap<const Cycle *, const CallBase *> CycleHearts;
+
+ // Just like the DominatorTree, compute the CycleInfo locally so that we
+ // can run the verifier outside of a pass manager and we don't rely on
+ // potentially out-dated analysis results.
+ CycleInfo CI;
+ CI.compute(F);
+
+ auto checkBundle = [&](OperandBundleUse &Bundle, CallBase *CB,
+ SmallVectorImpl<CallBase *> &LiveTokens) {
+ Check(Bundle.Inputs.size() == 1 && Bundle.Inputs[0]->getType()->isTokenTy(),
+ "The 'convergencectrl' bundle requires exactly one token use.", CB);
+
+ Value *Token = Bundle.Inputs[0].get();
+ auto *Def = dyn_cast<CallBase>(Token);
+ Check(Def != nullptr,
+ "Convergence control tokens can only be produced by call "
+ "instructions.",
+ Token);
+
+ Check(llvm::is_contained(LiveTokens, Token),
+ "Convergence region is not well-nested.", Token, CB);
+
+ while (LiveTokens.back() != Token)
+ LiveTokens.pop_back();
+
+ // Check static rules about cycles.
+ auto *BB = CB->getParent();
+ auto *BBCycle = CI.getCycle(BB);
+ if (!BBCycle)
+ return;
+
+ BasicBlock *DefBB = Def->getParent();
+ if (DefBB == BB || BBCycle->contains(DefBB)) {
+ // degenerate occurrence of a loop intrinsic
+ return;
+ }
+
+ auto *II = dyn_cast<IntrinsicInst>(CB);
+ Check(II &&
+ II->getIntrinsicID() == Intrinsic::experimental_convergence_loop,
+ "Convergence token used by an instruction other than "
+ "llvm.experimental.convergence.loop in a cycle that does "
+ "not contain the token's definition.",
+ CB, CI.print(BBCycle));
+
+ while (true) {
+ auto *Parent = BBCycle->getParentCycle();
+ if (!Parent || Parent->contains(DefBB))
+ break;
+ BBCycle = Parent;
+ };
+
+ Check(BBCycle->isReducible() && BB == BBCycle->getHeader(),
+ "Cycle heart must dominate all blocks in the cycle.", CB, BB,
+ CI.print(BBCycle));
+ Check(!CycleHearts.count(BBCycle),
+ "Two static convergence token uses in a cycle that does "
+ "not contain either token's definition.",
+ CB, CycleHearts[BBCycle], CI.print(BBCycle));
+ CycleHearts[BBCycle] = CB;
+ };
+
+ ReversePostOrderTraversal<Function *> RPOT(&F);
+ SmallVector<CallBase *, 8> LiveTokens;
+ for (BasicBlock *BB : RPOT) {
+ LiveTokens.clear();
+ auto LTIt = LiveTokenMap.find(BB);
+ if (LTIt != LiveTokenMap.end()) {
+ LiveTokens = std::move(LTIt->second);
+ LiveTokenMap.erase(LTIt);
+ }
+
+ for (Instruction &I : *BB) {
+ CallBase *CB = dyn_cast<CallBase>(&I);
+ if (!CB)
+ continue;
+
+ auto Bundle = CB->getOperandBundle(LLVMContext::OB_convergencectrl);
+ if (Bundle)
+ checkBundle(*Bundle, CB, LiveTokens);
+
+ if (CB->getType()->isTokenTy())
+ LiveTokens.push_back(CB);
+ }
+
+ // Propagate token liveness
+ for (BasicBlock *Succ : successors(BB)) {
+ DomTreeNode *SuccNode = DT.getNode(Succ);
+ LTIt = LiveTokenMap.find(Succ);
+ if (LTIt == LiveTokenMap.end()) {
+ // We're the first predecessor: all tokens which dominate the
+ // successor are live for now.
+ LTIt = LiveTokenMap.try_emplace(Succ).first;
+ for (CallBase *LiveToken : LiveTokens) {
+ if (!DT.dominates(DT.getNode(LiveToken->getParent()), SuccNode))
+ break;
+ LTIt->second.push_back(LiveToken);
+ }
+ } else {
+ // Compute the intersection of live tokens.
+ auto It = llvm::partition(LTIt->second, [&LiveTokens](CallBase *Token) {
+ return llvm::is_contained(LiveTokens, Token);
+ });
+ LTIt->second.erase(It, LTIt->second.end());
+ }
+ }
+ }
+}
+
// visitFunction - Verify that a function is ok.
//
void Verifier::visitFunction(const Function &F) {
@@ -2540,6 +2694,8 @@ void Verifier::visitFunction(const Function &F) {
}
case CallingConv::AMDGPU_KERNEL:
case CallingConv::SPIR_KERNEL:
+ case CallingConv::AMDGPU_CS_Chain:
+ case CallingConv::AMDGPU_CS_ChainPreserve:
Check(F.getReturnType()->isVoidTy(),
"Calling convention requires void return type", &F);
[[fallthrough]];
@@ -2630,6 +2786,9 @@ void Verifier::visitFunction(const Function &F) {
F.getParent(), Per, Per->getParent());
}
+ // EH funclet coloring can be expensive, recompute on-demand
+ BlockEHFuncletColors.clear();
+
if (F.isMaterializable()) {
// Function has a body somewhere we can't see.
Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
@@ -3207,14 +3366,23 @@ void Verifier::visitPHINode(PHINode &PN) {
visitInstruction(PN);
}
+static bool isControlledConvergent(const CallBase &Call) {
+ if (Call.getOperandBundle(LLVMContext::OB_convergencectrl))
+ return true;
+ if (const auto *F = dyn_cast<Function>(Call.getCalledOperand())) {
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::experimental_convergence_anchor:
+ case Intrinsic::experimental_convergence_entry:
+ case Intrinsic::experimental_convergence_loop:
+ return true;
+ }
+ }
+ return false;
+}
+
void Verifier::visitCallBase(CallBase &Call) {
Check(Call.getCalledOperand()->getType()->isPointerTy(),
"Called function must be a pointer!", Call);
- PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
-
- Check(FPTy->isOpaqueOrPointeeTypeMatches(Call.getFunctionType()),
- "Called function is not the same type as the call!", Call);
-
FunctionType *FTy = Call.getFunctionType();
// Verify that the correct number of arguments are being passed
@@ -3243,6 +3411,15 @@ void Verifier::visitCallBase(CallBase &Call) {
Check(Callee->getValueType() == FTy,
"Intrinsic called with incompatible signature", Call);
+ // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
+ // convention.
+ auto CC = Call.getCallingConv();
+ Check(CC != CallingConv::AMDGPU_CS_Chain &&
+ CC != CallingConv::AMDGPU_CS_ChainPreserve,
+ "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
+ "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
+ Call);
+
auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
if (!Ty->isSized())
return;
@@ -3496,6 +3673,23 @@ void Verifier::visitCallBase(CallBase &Call) {
if (Call.isInlineAsm())
verifyInlineAsmCall(Call);
+ if (isControlledConvergent(Call)) {
+ Check(Call.isConvergent(),
+ "Expected convergent attribute on a controlled convergent call.",
+ Call);
+ Check(ConvergenceKind != UncontrolledConvergence,
+ "Cannot mix controlled and uncontrolled convergence in the same "
+ "function.",
+ Call);
+ ConvergenceKind = ControlledConvergence;
+ } else if (Call.isConvergent()) {
+ Check(ConvergenceKind != ControlledConvergence,
+ "Cannot mix controlled and uncontrolled convergence in the same "
+ "function.",
+ Call);
+ ConvergenceKind = UncontrolledConvergence;
+ }
+
visitInstruction(Call);
}
@@ -3796,6 +3990,14 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
"GEP base pointer is not a vector or a vector of pointers", &GEP);
Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
+ if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
+ SmallPtrSet<Type *, 4> Visited;
+ Check(!STy->containsScalableVectorType(&Visited),
+ "getelementptr cannot target structure that contains scalable vector"
+ "type",
+ &GEP);
+ }
+
SmallVector<Value *, 16> Idxs(GEP.indices());
Check(
all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
@@ -3839,10 +4041,10 @@ static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
}
-void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
- assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
- "precondition violation");
-
+/// Verify !range and !absolute_symbol metadata. These have the same
+/// restrictions, except !absolute_symbol allows the full set.
+void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
+ Type *Ty, bool IsAbsoluteSymbol) {
unsigned NumOperands = Range->getNumOperands();
Check(NumOperands % 2 == 0, "Unfinished range!", Range);
unsigned NumRanges = NumOperands / 2;
@@ -3856,13 +4058,20 @@ void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
ConstantInt *High =
mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
Check(High, "The upper limit must be an integer!", High);
- Check(High->getType() == Low->getType() && High->getType() == Ty,
+ Check(High->getType() == Low->getType() &&
+ High->getType() == Ty->getScalarType(),
"Range types must match instruction type!", &I);
APInt HighV = High->getValue();
APInt LowV = Low->getValue();
+
+ // ConstantRange asserts if the ranges are the same except for the min/max
+ // value. Leave the cases it tolerates for the empty range error below.
+ Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
+ "The upper and lower limits cannot be the same value", &I);
+
ConstantRange CurRange(LowV, HighV);
- Check(!CurRange.isEmptySet() && !CurRange.isFullSet(),
+ Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
"Range must not be empty!", Range);
if (i != 0) {
Check(CurRange.intersectWith(LastRange).isEmptySet(),
@@ -3887,6 +4096,12 @@ void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
}
}
+void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
+ assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
+ "precondition violation");
+ verifyRangeMetadata(I, Range, Ty, false);
+}
+
void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
unsigned Size = DL.getTypeSizeInBits(Ty);
Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
@@ -3924,8 +4139,6 @@ void Verifier::visitStoreInst(StoreInst &SI) {
PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
Check(PTy, "Store operand must be a pointer.", &SI);
Type *ElTy = SI.getOperand(0)->getType();
- Check(PTy->isOpaqueOrPointeeTypeMatches(ElTy),
- "Stored value type does not match pointer operand type!", &SI, ElTy);
if (MaybeAlign A = SI.getAlign()) {
Check(A->value() <= Value::MaximumAlignment,
"huge alignment values are unsupported", &SI);
@@ -4637,8 +4850,15 @@ void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
Check(Annotation->getNumOperands() >= 1,
"annotation must have at least one operand");
- for (const MDOperand &Op : Annotation->operands())
- Check(isa<MDString>(Op.get()), "operands must be strings");
+ for (const MDOperand &Op : Annotation->operands()) {
+ bool TupleOfStrings =
+ isa<MDTuple>(Op.get()) &&
+ all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
+ return isa<MDString>(Annotation.get());
+ });
+ Check(isa<MDString>(Op.get()) || TupleOfStrings,
+ "operands must be a string or a tuple of strings");
+ }
}
void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
@@ -5038,7 +5258,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
case Intrinsic::is_fpclass: {
const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
- Check((TestMask->getZExtValue() & ~fcAllFlags) == 0,
+ Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
"unsupported bits for llvm.is.fpclass test mask");
break;
}
@@ -5076,9 +5296,6 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
"invalid llvm.dbg.declare intrinsic call 1", Call);
visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
break;
- case Intrinsic::dbg_addr: // llvm.dbg.addr
- visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
- break;
case Intrinsic::dbg_value: // llvm.dbg.value
visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
break;
@@ -5414,11 +5631,16 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
Call);
break;
}
+ case Intrinsic::experimental_get_vector_length: {
+ ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
+ Check(!VF->isNegative() && !VF->isZero(),
+ "get_vector_length: VF must be positive", Call);
+ break;
+ }
case Intrinsic::masked_load: {
Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
Call);
- Value *Ptr = Call.getArgOperand(0);
ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
Value *Mask = Call.getArgOperand(2);
Value *PassThru = Call.getArgOperand(3);
@@ -5426,10 +5648,6 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
Call);
Check(Alignment->getValue().isPowerOf2(),
"masked_load: alignment must be a power of 2", Call);
-
- PointerType *PtrTy = cast<PointerType>(Ptr->getType());
- Check(PtrTy->isOpaqueOrPointeeTypeMatches(Call.getType()),
- "masked_load: return must match pointer type", Call);
Check(PassThru->getType() == Call.getType(),
"masked_load: pass through and return type must match", Call);
Check(cast<VectorType>(Mask->getType())->getElementCount() ==
@@ -5439,17 +5657,12 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
case Intrinsic::masked_store: {
Value *Val = Call.getArgOperand(0);
- Value *Ptr = Call.getArgOperand(1);
ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
Value *Mask = Call.getArgOperand(3);
Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
Call);
Check(Alignment->getValue().isPowerOf2(),
"masked_store: alignment must be a power of 2", Call);
-
- PointerType *PtrTy = cast<PointerType>(Ptr->getType());
- Check(PtrTy->isOpaqueOrPointeeTypeMatches(Val->getType()),
- "masked_store: storee must match pointer type", Call);
Check(cast<VectorType>(Mask->getType())->getElementCount() ==
cast<VectorType>(Val->getType())->getElementCount(),
"masked_store: vector mask must be same length as value", Call);
@@ -5600,15 +5813,28 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
Type *Op0ElemTy = nullptr;
Type *Op1ElemTy = nullptr;
switch (ID) {
- case Intrinsic::matrix_multiply:
+ case Intrinsic::matrix_multiply: {
NumRows = cast<ConstantInt>(Call.getArgOperand(2));
+ ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
+ Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
+ ->getNumElements() ==
+ NumRows->getZExtValue() * N->getZExtValue(),
+ "First argument of a matrix operation does not match specified "
+ "shape!");
+ Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
+ ->getNumElements() ==
+ N->getZExtValue() * NumColumns->getZExtValue(),
+ "Second argument of a matrix operation does not match specified "
+ "shape!");
+
ResultTy = cast<VectorType>(Call.getType());
Op0ElemTy =
cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
Op1ElemTy =
cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
break;
+ }
case Intrinsic::matrix_transpose:
NumRows = cast<ConstantInt>(Call.getArgOperand(1));
NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
@@ -5621,11 +5847,6 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
NumRows = cast<ConstantInt>(Call.getArgOperand(3));
NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
ResultTy = cast<VectorType>(Call.getType());
-
- PointerType *Op0PtrTy =
- cast<PointerType>(Call.getArgOperand(0)->getType());
- if (!Op0PtrTy->isOpaque())
- Op0ElemTy = Op0PtrTy->getNonOpaquePointerElementType();
break;
}
case Intrinsic::matrix_column_major_store: {
@@ -5635,11 +5856,6 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
Op0ElemTy =
cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
-
- PointerType *Op1PtrTy =
- cast<PointerType>(Call.getArgOperand(1)->getType());
- if (!Op1PtrTy->isOpaque())
- Op1ElemTy = Op1PtrTy->getNonOpaquePointerElementType();
break;
}
default:
@@ -5794,7 +6010,102 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
"isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
break;
}
+ case Intrinsic::callbr_landingpad: {
+ const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
+ Check(CBR, "intrinstic requires callbr operand", &Call);
+ if (!CBR)
+ break;
+
+ const BasicBlock *LandingPadBB = Call.getParent();
+ const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
+ if (!PredBB) {
+ CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
+ break;
+ }
+ if (!isa<CallBrInst>(PredBB->getTerminator())) {
+ CheckFailed("Intrinsic must have corresponding callbr in predecessor",
+ &Call);
+ break;
+ }
+ Check(llvm::any_of(CBR->getIndirectDests(),
+ [LandingPadBB](const BasicBlock *IndDest) {
+ return IndDest == LandingPadBB;
+ }),
+ "Intrinsic's corresponding callbr must have intrinsic's parent basic "
+ "block in indirect destination list",
+ &Call);
+ const Instruction &First = *LandingPadBB->begin();
+ Check(&First == &Call, "No other instructions may proceed intrinsic",
+ &Call);
+ break;
+ }
+ case Intrinsic::amdgcn_cs_chain: {
+ auto CallerCC = Call.getCaller()->getCallingConv();
+ switch (CallerCC) {
+ case CallingConv::AMDGPU_CS:
+ case CallingConv::AMDGPU_CS_Chain:
+ case CallingConv::AMDGPU_CS_ChainPreserve:
+ break;
+ default:
+ CheckFailed("Intrinsic can only be used from functions with the "
+ "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
+ "calling conventions",
+ &Call);
+ break;
+ }
+ break;
+ }
+ case Intrinsic::experimental_convergence_entry:
+ Check(Call.getFunction()->isConvergent(),
+ "Entry intrinsic can occur only in a convergent function.", &Call);
+ Check(Call.getParent()->isEntryBlock(),
+ "Entry intrinsic must occur in the entry block.", &Call);
+ Check(Call.getParent()->getFirstNonPHI() == &Call,
+ "Entry intrinsic must occur at the start of the basic block.", &Call);
+ LLVM_FALLTHROUGH;
+ case Intrinsic::experimental_convergence_anchor:
+ Check(!Call.getOperandBundle(LLVMContext::OB_convergencectrl),
+ "Entry or anchor intrinsic must not have a convergencectrl bundle.",
+ &Call);
+ break;
+ case Intrinsic::experimental_convergence_loop:
+ Check(Call.getOperandBundle(LLVMContext::OB_convergencectrl),
+ "Loop intrinsic must have a convergencectrl bundle.", &Call);
+ Check(Call.getParent()->getFirstNonPHI() == &Call,
+ "Loop intrinsic must occur at the start of the basic block.", &Call);
+ break;
};
+
+ // Verify that there aren't any unmediated control transfers between funclets.
+ if (IntrinsicInst::mayLowerToFunctionCall(ID)) {
+ Function *F = Call.getParent()->getParent();
+ if (F->hasPersonalityFn() &&
+ isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
+ // Run EH funclet coloring on-demand and cache results for other intrinsic
+ // calls in this function
+ if (BlockEHFuncletColors.empty())
+ BlockEHFuncletColors = colorEHFunclets(*F);
+
+ // Check for catch-/cleanup-pad in first funclet block
+ bool InEHFunclet = false;
+ BasicBlock *CallBB = Call.getParent();
+ const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
+ assert(CV.size() > 0 && "Uncolored block");
+ for (BasicBlock *ColorFirstBB : CV)
+ if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
+ InEHFunclet = true;
+
+ // Check for funclet operand bundle
+ bool HasToken = false;
+ for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
+ if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
+ HasToken = true;
+
+ // This would cause silent code truncation in WinEHPrepare
+ if (InEHFunclet)
+ Check(HasToken, "Missing funclet token on intrinsic call", &Call);
+ }
+ }
}
/// Carefully grab the subprogram from a local scope.
@@ -5961,20 +6272,20 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
case Intrinsic::experimental_constrained_fptosi:
case Intrinsic::experimental_constrained_fptoui: {
Value *Operand = FPI.getArgOperand(0);
- uint64_t NumSrcElem = 0;
+ ElementCount SrcEC;
Check(Operand->getType()->isFPOrFPVectorTy(),
"Intrinsic first argument must be floating point", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ SrcEC = cast<VectorType>(OperandT)->getElementCount();
}
Operand = &FPI;
- Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
"Intrinsic first argument and result disagree on vector use", &FPI);
Check(Operand->getType()->isIntOrIntVectorTy(),
"Intrinsic result must be an integer", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
"Intrinsic first argument and result vector lengths must be equal",
&FPI);
}
@@ -5984,20 +6295,20 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
case Intrinsic::experimental_constrained_sitofp:
case Intrinsic::experimental_constrained_uitofp: {
Value *Operand = FPI.getArgOperand(0);
- uint64_t NumSrcElem = 0;
+ ElementCount SrcEC;
Check(Operand->getType()->isIntOrIntVectorTy(),
"Intrinsic first argument must be integer", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ SrcEC = cast<VectorType>(OperandT)->getElementCount();
}
Operand = &FPI;
- Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
"Intrinsic first argument and result disagree on vector use", &FPI);
Check(Operand->getType()->isFPOrFPVectorTy(),
"Intrinsic result must be a floating point", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
"Intrinsic first argument and result vector lengths must be equal",
&FPI);
}
@@ -6016,8 +6327,8 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
"Intrinsic first argument and result disagree on vector use", &FPI);
if (OperandTy->isVectorTy()) {
- Check(cast<FixedVectorType>(OperandTy)->getNumElements() ==
- cast<FixedVectorType>(ResultTy)->getNumElements(),
+ Check(cast<VectorType>(OperandTy)->getElementCount() ==
+ cast<VectorType>(ResultTy)->getElementCount(),
"Intrinsic first argument and result vector lengths must be equal",
&FPI);
}
@@ -6221,7 +6532,17 @@ void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
if (!E || !E->isValid())
return;
- CheckDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
+ // We allow EntryValues for swift async arguments, as they have an
+ // ABI-guarantee to be turned into a specific register.
+ if (isa<ValueAsMetadata>(I.getRawLocation()))
+ if (auto *ArgLoc = dyn_cast_or_null<Argument>(I.getVariableLocationOp(0));
+ ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
+ return;
+
+ CheckDI(!E->isEntryValue(),
+ "Entry values are only allowed in MIR unless they target a "
+ "swiftasync Argument",
+ &I);
}
void Verifier::verifyCompileUnits() {
@@ -6680,6 +7001,9 @@ static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
}
bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
+ CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
+ &I, MD);
+
CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
isa<AtomicCmpXchgInst>(I),