aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/include
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-01-20 11:41:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-01-20 11:41:25 +0000
commitd9484dd61cc151c4f34c31e07f693fefa66316b5 (patch)
treeab0560b3da293f1fafd3269c59692e929418f5c2 /contrib/llvm/include
parent79e0962d4c3cf1f0acf359a9d69cb3ac68c414c4 (diff)
parentd8e91e46262bc44006913e6796843909f1ac7bcd (diff)
downloadsrc-d9484dd61cc151c4f34c31e07f693fefa66316b5.tar.gz
src-d9484dd61cc151c4f34c31e07f693fefa66316b5.zip
Merge llvm trunk r351319, resolve conflicts, and update FREEBSD-Xlist.
Notes
Notes: svn path=/projects/clang800-import/; revision=343210
Diffstat (limited to 'contrib/llvm/include')
-rw-r--r--contrib/llvm/include/llvm-c/Core.h371
-rw-r--r--contrib/llvm/include/llvm-c/DebugInfo.h88
-rw-r--r--contrib/llvm/include/llvm-c/Error.h69
-rw-r--r--contrib/llvm/include/llvm-c/ExecutionEngine.h2
-rw-r--r--contrib/llvm/include/llvm-c/OptRemarks.h204
-rw-r--r--contrib/llvm/include/llvm-c/OrcBindings.h73
-rw-r--r--contrib/llvm/include/llvm-c/TargetMachine.h6
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/AggressiveInstCombine.h43
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Coroutines.h55
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Scalar.h9
-rw-r--r--contrib/llvm/include/llvm-c/Types.h14
-rw-r--r--contrib/llvm/include/llvm-c/lto.h12
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h36
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h64
-rw-r--r--contrib/llvm/include/llvm/ADT/Any.h10
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h17
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h81
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseSet.h35
-rw-r--r--contrib/llvm/include/llvm/ADT/GraphTraits.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/Hashing.h15
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableList.h36
-rw-r--r--contrib/llvm/include/llvm/ADT/IntervalMap.h24
-rw-r--r--contrib/llvm/include/llvm/ADT/Optional.h22
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerSumType.h128
-rw-r--r--contrib/llvm/include/llvm/ADT/PostOrderIterator.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/STLExtras.h358
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h59
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h10
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseBitVector.h75
-rw-r--r--contrib/llvm/include/llvm/ADT/StringExtras.h11
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h32
-rw-r--r--contrib/llvm/include/llvm/ADT/bit.h59
-rw-r--r--contrib/llvm/include/llvm/ADT/iterator.h38
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h143
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasSetTracker.h57
-rw-r--r--contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFG.h3
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFGPrinter.h5
-rw-r--r--contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h39
-rw-r--r--contrib/llvm/include/llvm/Analysis/CaptureTracking.h23
-rw-r--r--contrib/llvm/include/llvm/Analysis/CmpInstAnalysis.h17
-rw-r--r--contrib/llvm/include/llvm/Analysis/DemandedBits.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h11
-rw-r--r--contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h198
-rw-r--r--contrib/llvm/include/llvm/Analysis/GlobalsModRef.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/GuardUtils.h26
-rw-r--r--contrib/llvm/include/llvm/Analysis/IVDescriptors.h357
-rw-r--r--contrib/llvm/include/llvm/Analysis/IndirectCallSiteVisitor.h35
-rw-r--r--contrib/llvm/include/llvm/Analysis/IndirectCallVisitor.h39
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h36
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h150
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionSimplify.h47
-rw-r--r--contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h28
-rw-r--r--contrib/llvm/include/llvm/Analysis/LegacyDivergenceAnalysis.h69
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h56
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h26
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h9
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h11
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryLocation.h167
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemorySSA.h83
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemorySSAUpdater.h96
-rw-r--r--contrib/llvm/include/llvm/Analysis/MustExecute.h140
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h38
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h3
-rw-r--r--contrib/llvm/include/llvm/Analysis/OrderedInstructions.h (renamed from contrib/llvm/include/llvm/Transforms/Utils/OrderedInstructions.h)6
-rw-r--r--contrib/llvm/include/llvm/Analysis/Passes.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/PhiValues.h16
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h15
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/SparsePropagation.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/StackSafetyAnalysis.h120
-rw-r--r--contrib/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h86
-rw-r--r--contrib/llvm/include/llvm/Analysis/SyntheticCountsUtils.h15
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def27
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h55
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/TypeMetadataUtils.h7
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h68
-rw-r--r--contrib/llvm/include/llvm/Analysis/VectorUtils.h424
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h70
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/Dwarf.def111
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/Dwarf.h14
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/ELF.h56
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/ELFRelocs/MSP430.def16
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MachO.h5
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MsgPack.def108
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MsgPack.h93
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MsgPackReader.h148
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MsgPackTypes.h372
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/MsgPackWriter.h131
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/Wasm.h130
-rw-r--r--contrib/llvm/include/llvm/BinaryFormat/WasmRelocs.def2
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitcodeReader.h1
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinterHandler.h74
-rw-r--r--contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h253
-rw-r--r--contrib/llvm/include/llvm/CodeGen/BuiltinGCs.h (renamed from contrib/llvm/include/llvm/CodeGen/GCs.h)23
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CommandFlags.inc28
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h87
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DebugHandlerBase.h138
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h47
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCStrategy.h50
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h237
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h110
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h10
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h19
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h104
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h111
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h35
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h145
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h25
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h471
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h100
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervals.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h17
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveRegUnits.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h42
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h119
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h281
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h17
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h22
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOutliner.h78
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h80
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePipeliner.h608
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h19
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineScheduler.h25
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h10
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterUsageInfo.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h23
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h119
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h141
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/StackMaps.h44
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetFrameLowering.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetInstrInfo.h67
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLowering.h304
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetPassConfig.h34
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetRegisterInfo.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h38
-rw-r--r--contrib/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h4
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CVRecord.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h19
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewError.h35
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def577
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h12
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/RecordSerialization.h23
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h2
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDumper.h8
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecord.h31
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h62
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h9
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h42
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h28
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h9
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DIContext.h7
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h20
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h147
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h26
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h31
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h20
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h4
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h3
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h9
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h13
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h9
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFListTable.h26
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFObject.h15
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h15
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h206
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFVerifier.h59
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/MSF/MSFError.h30
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h4
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAError.h34
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h27
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h3
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/GenericError.h42
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h15
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBFrameData.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h47
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h8
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h3
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h2
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h13
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h10
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h7
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h43
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h7
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h60
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h12
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h38
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSession.h30
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h51
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h50
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h (renamed from contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h)21
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h75
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h74
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h61
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h42
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h74
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h46
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h4
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawError.h34
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h1
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/SymbolCache.h148
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiHashing.h48
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h13
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h12
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h48
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h7
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h7
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h6
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h46
-rw-r--r--contrib/llvm/include/llvm/Demangle/Compiler.h93
-rw-r--r--contrib/llvm/include/llvm/Demangle/Demangle.h10
-rw-r--r--contrib/llvm/include/llvm/Demangle/ItaniumDemangle.h5184
-rw-r--r--contrib/llvm/include/llvm/Demangle/MicrosoftDemangle.h276
-rw-r--r--contrib/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h605
-rw-r--r--contrib/llvm/include/llvm/Demangle/StringView.h121
-rw-r--r--contrib/llvm/include/llvm/Demangle/Utility.h187
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h35
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITSymbol.h93
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h173
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h10
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/Core.h777
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h94
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h17
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h26
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h257
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h130
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h124
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/Layer.h76
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h195
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/Legacy.h82
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h4
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h14
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h75
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h83
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h3
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCUtils.h76
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h98
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h27
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h163
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h25
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.h117
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.td9
-rw-r--r--contrib/llvm/include/llvm/IR/BasicBlock.h27
-rw-r--r--contrib/llvm/include/llvm/IR/CFG.h169
-rw-r--r--contrib/llvm/include/llvm/IR/CFGDiff.h285
-rw-r--r--contrib/llvm/include/llvm/IR/CallSite.h5
-rw-r--r--contrib/llvm/include/llvm/IR/CallingConv.h3
-rw-r--r--contrib/llvm/include/llvm/IR/Constant.h3
-rw-r--r--contrib/llvm/include/llvm/IR/Constants.h13
-rw-r--r--contrib/llvm/include/llvm/IR/DIBuilder.h70
-rw-r--r--contrib/llvm/include/llvm/IR/DataLayout.h8
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfoFlags.def41
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfoMetadata.h448
-rw-r--r--contrib/llvm/include/llvm/IR/DebugLoc.h7
-rw-r--r--contrib/llvm/include/llvm/IR/DiagnosticInfo.h24
-rw-r--r--contrib/llvm/include/llvm/IR/DomTreeUpdater.h8
-rw-r--r--contrib/llvm/include/llvm/IR/Dominators.h100
-rw-r--r--contrib/llvm/include/llvm/IR/Function.h20
-rw-r--r--contrib/llvm/include/llvm/IR/GlobalValue.h1
-rw-r--r--contrib/llvm/include/llvm/IR/IRBuilder.h266
-rw-r--r--contrib/llvm/include/llvm/IR/IRPrintingPasses.h16
-rw-r--r--contrib/llvm/include/llvm/IR/InstVisitor.h72
-rw-r--r--contrib/llvm/include/llvm/IR/InstrTypes.h1162
-rw-r--r--contrib/llvm/include/llvm/IR/Instruction.def153
-rw-r--r--contrib/llvm/include/llvm/IR/Instruction.h50
-rw-r--r--contrib/llvm/include/llvm/IR/Instructions.h1279
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicInst.h42
-rw-r--r--contrib/llvm/include/llvm/IR/Intrinsics.td152
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td17
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td188
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td14893
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td6
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsRISCV.td44
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td72
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsX86.td801
-rw-r--r--contrib/llvm/include/llvm/IR/LLVMContext.h1
-rw-r--r--contrib/llvm/include/llvm/IR/LegacyPassManager.h3
-rw-r--r--contrib/llvm/include/llvm/IR/LegacyPassManagers.h17
-rw-r--r--contrib/llvm/include/llvm/IR/Metadata.h16
-rw-r--r--contrib/llvm/include/llvm/IR/Module.h43
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h187
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h15
-rw-r--r--contrib/llvm/include/llvm/IR/Operator.h31
-rw-r--r--contrib/llvm/include/llvm/IR/PassInstrumentation.h207
-rw-r--r--contrib/llvm/include/llvm/IR/PassManager.h133
-rw-r--r--contrib/llvm/include/llvm/IR/PassManagerInternal.h11
-rw-r--r--contrib/llvm/include/llvm/IR/PassTimingInfo.h108
-rw-r--r--contrib/llvm/include/llvm/IR/PatternMatch.h252
-rw-r--r--contrib/llvm/include/llvm/IR/RuntimeLibcalls.def8
-rw-r--r--contrib/llvm/include/llvm/IR/TypeBuilder.h407
-rw-r--r--contrib/llvm/include/llvm/IR/Value.h3
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h19
-rw-r--r--contrib/llvm/include/llvm/LTO/Config.h7
-rw-r--r--contrib/llvm/include/llvm/LTO/LTO.h24
-rw-r--r--contrib/llvm/include/llvm/LTO/SummaryBasedOptimizations.h17
-rw-r--r--contrib/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h5
-rw-r--r--contrib/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h15
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoWasm.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmMacro.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeView.h72
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCDwarf.h7
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h5
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCInst.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrAnalysis.h70
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrDesc.h9
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectFileInfo.h33
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h13
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h16
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h9
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCRegisterInfo.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCSchedule.h21
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h7
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbolWasm.h48
-rw-r--r--contrib/llvm/include/llvm/MC/MCWasmObjectWriter.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCWin64EH.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCWinEH.h9
-rw-r--r--contrib/llvm/include/llvm/MCA/Context.h69
-rw-r--r--contrib/llvm/include/llvm/MCA/HWEventListener.h156
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/HardwareUnit.h33
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h207
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/RegisterFile.h239
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h410
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/RetireControlUnit.h104
-rw-r--r--contrib/llvm/include/llvm/MCA/HardwareUnits/Scheduler.h214
-rw-r--r--contrib/llvm/include/llvm/MCA/InstrBuilder.h77
-rw-r--r--contrib/llvm/include/llvm/MCA/Instruction.h551
-rw-r--r--contrib/llvm/include/llvm/MCA/Pipeline.h79
-rw-r--r--contrib/llvm/include/llvm/MCA/SourceMgr.h57
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/DispatchStage.h93
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/EntryStage.h52
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/ExecuteStage.h80
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/InstructionTables.h46
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/RetireStage.h48
-rw-r--r--contrib/llvm/include/llvm/MCA/Stages/Stage.h88
-rw-r--r--contrib/llvm/include/llvm/MCA/Support.h119
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h12
-rw-r--r--contrib/llvm/include/llvm/Object/ELF.h8
-rw-r--r--contrib/llvm/include/llvm/Object/ELFObjectFile.h29
-rw-r--r--contrib/llvm/include/llvm/Object/ELFTypes.h25
-rw-r--r--contrib/llvm/include/llvm/Object/Error.h1
-rw-r--r--contrib/llvm/include/llvm/Object/MachO.h5
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h22
-rw-r--r--contrib/llvm/include/llvm/Object/RelocVisitor.h3
-rw-r--r--contrib/llvm/include/llvm/Object/Wasm.h89
-rw-r--r--contrib/llvm/include/llvm/Object/WasmTraits.h14
-rw-r--r--contrib/llvm/include/llvm/ObjectYAML/COFFYAML.h6
-rw-r--r--contrib/llvm/include/llvm/ObjectYAML/ELFYAML.h2
-rw-r--r--contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h37
-rw-r--r--contrib/llvm/include/llvm/Option/OptTable.h8
-rw-r--r--contrib/llvm/include/llvm/Pass.h11
-rw-r--r--contrib/llvm/include/llvm/Passes/PassBuilder.h84
-rw-r--r--contrib/llvm/include/llvm/Passes/StandardInstrumentations.h70
-rw-r--r--contrib/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h13
-rw-r--r--contrib/llvm/include/llvm/ProfileData/GCOV.h20
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProf.h6
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProfReader.h30
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProf.h91
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProfReader.h80
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h51
-rw-r--r--contrib/llvm/include/llvm/Support/AArch64TargetParser.def18
-rw-r--r--contrib/llvm/include/llvm/Support/AArch64TargetParser.h124
-rw-r--r--contrib/llvm/include/llvm/Support/AMDGPUMetadata.h15
-rw-r--r--contrib/llvm/include/llvm/Support/ARMTargetParser.def15
-rw-r--r--contrib/llvm/include/llvm/Support/ARMTargetParser.h264
-rw-r--r--contrib/llvm/include/llvm/Support/ARMWinEH.h88
-rw-r--r--contrib/llvm/include/llvm/Support/Allocator.h55
-rw-r--r--contrib/llvm/include/llvm/Support/BinaryStreamArray.h31
-rw-r--r--contrib/llvm/include/llvm/Support/BinaryStreamReader.h5
-rw-r--r--contrib/llvm/include/llvm/Support/BuryPointer.h30
-rw-r--r--contrib/llvm/include/llvm/Support/CFGUpdate.h118
-rw-r--r--contrib/llvm/include/llvm/Support/Chrono.h8
-rw-r--r--contrib/llvm/include/llvm/Support/CodeGen.h7
-rw-r--r--contrib/llvm/include/llvm/Support/CommandLine.h19
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h19
-rw-r--r--contrib/llvm/include/llvm/Support/Compression.h13
-rw-r--r--contrib/llvm/include/llvm/Support/Debug.h4
-rw-r--r--contrib/llvm/include/llvm/Support/DebugCounter.h2
-rw-r--r--contrib/llvm/include/llvm/Support/Error.h105
-rw-r--r--contrib/llvm/include/llvm/Support/ErrorHandling.h4
-rw-r--r--contrib/llvm/include/llvm/Support/FileCheck.h282
-rw-r--r--contrib/llvm/include/llvm/Support/FileOutputBuffer.h4
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h143
-rw-r--r--contrib/llvm/include/llvm/Support/FormatVariadicDetails.h2
-rw-r--r--contrib/llvm/include/llvm/Support/GenericDomTree.h58
-rw-r--r--contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h106
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h23
-rw-r--r--contrib/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h93
-rw-r--r--contrib/llvm/include/llvm/Support/JSON.h9
-rw-r--r--contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h9
-rw-r--r--contrib/llvm/include/llvm/Support/MSVCErrorWorkarounds.h84
-rw-r--r--contrib/llvm/include/llvm/Support/Path.h16
-rw-r--r--contrib/llvm/include/llvm/Support/ScopedPrinter.h2
-rw-r--r--contrib/llvm/include/llvm/Support/SymbolRemappingReader.h133
-rw-r--r--contrib/llvm/include/llvm/Support/TargetOpcodes.def54
-rw-r--r--contrib/llvm/include/llvm/Support/TargetParser.h293
-rw-r--r--contrib/llvm/include/llvm/Support/Threading.h3
-rw-r--r--contrib/llvm/include/llvm/Support/Timer.h14
-rw-r--r--contrib/llvm/include/llvm/Support/VirtualFileSystem.h764
-rw-r--r--contrib/llvm/include/llvm/Support/Win64EH.h19
-rw-r--r--contrib/llvm/include/llvm/Support/WithColor.h63
-rw-r--r--contrib/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h2
-rw-r--r--contrib/llvm/include/llvm/Support/X86TargetParser.def46
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLTraits.h326
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h12
-rw-r--r--contrib/llvm/include/llvm/Support/type_traits.h5
-rw-r--r--contrib/llvm/include/llvm/TableGen/StringMatcher.h7
-rw-r--r--contrib/llvm/include/llvm/Target/CodeGenCWrappers.h4
-rw-r--r--contrib/llvm/include/llvm/Target/GenericOpcodes.td119
-rw-r--r--contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td7
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td13
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrPredicate.td242
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h22
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h61
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h12
-rw-r--r--contrib/llvm/include/llvm/Target/TargetPfmCounters.td50
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSchedule.td66
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td60
-rw-r--r--contrib/llvm/include/llvm/Testing/Support/SupportHelpers.h9
-rw-r--r--contrib/llvm/include/llvm/TextAPI/ELF/ELFStub.h69
-rw-r--r--contrib/llvm/include/llvm/TextAPI/ELF/TBEHandler.h45
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h3
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h64
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h31
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/SampleProfile.h7
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h33
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation/ControlHeightReduction.h31
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h48
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h33
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h20
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/ConstantHoisting.h67
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/GVN.h17
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/JumpThreading.h38
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h27
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h74
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/MakeGuardsExplicit.h47
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/SCCP.h14
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/Scalarizer.h35
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h38
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils.h7
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h67
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h13
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CanonicalizeAliases.h32
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h66
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h23
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/FunctionImportUtils.h3
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/GuardUtils.h30
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h74
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h450
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h18
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/PredicateInfo.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h25
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h23
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h27
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h21
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h11
-rw-r--r--contrib/llvm/include/llvm/XRay/BlockIndexer.h69
-rw-r--r--contrib/llvm/include/llvm/XRay/BlockPrinter.h62
-rw-r--r--contrib/llvm/include/llvm/XRay/BlockVerifier.h72
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRLogBuilder.h41
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRRecordConsumer.h55
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRRecordProducer.h51
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRRecords.h450
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRTraceExpander.h63
-rw-r--r--contrib/llvm/include/llvm/XRay/FDRTraceWriter.h56
-rw-r--r--contrib/llvm/include/llvm/XRay/FileHeaderReader.h33
-rw-r--r--contrib/llvm/include/llvm/XRay/Profile.h150
-rw-r--r--contrib/llvm/include/llvm/XRay/RecordPrinter.h50
-rw-r--r--contrib/llvm/include/llvm/XRay/Trace.h22
-rw-r--r--contrib/llvm/include/llvm/XRay/XRayRecord.h23
-rw-r--r--contrib/llvm/include/llvm/XRay/YAMLXRayRecord.h9
-rw-r--r--contrib/llvm/include/llvm/module.extern.modulemap5
-rw-r--r--contrib/llvm/include/llvm/module.install.modulemap27
-rw-r--r--contrib/llvm/include/llvm/module.modulemap82
561 files changed, 38237 insertions, 17725 deletions
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index 6792219f8730..06de058bdc58 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -54,6 +54,8 @@ extern "C" {
* @{
*/
+/// External users depend on the following values being stable. It is not safe
+/// to reorder them.
typedef enum {
/* Terminator Instructions */
LLVMRet = 1,
@@ -64,6 +66,9 @@ typedef enum {
/* removed 6 due to API changes */
LLVMUnreachable = 7,
+ /* Standard Unary Operators */
+ LLVMFNeg = 66,
+
/* Standard Binary Operators */
LLVMAdd = 8,
LLVMFAdd = 9,
@@ -516,6 +521,23 @@ void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
void *OpaqueHandle);
/**
+ * Retrieve whether the given context is set to discard all value names.
+ *
+ * @see LLVMContext::shouldDiscardValueNames()
+ */
+LLVMBool LLVMContextShouldDiscardValueNames(LLVMContextRef C);
+
+/**
+ * Set whether the given context discards all value names.
+ *
+ * If true, only the names of GlobalValue objects will be available in the IR.
+ * This can be used to save memory and runtime, especially in release mode.
+ *
+ * @see LLVMContext::setDiscardValueNames()
+ */
+void LLVMContextSetDiscardValueNames(LLVMContextRef C, LLVMBool Discard);
+
+/**
* Destroy a context instance.
*
* This should be called for every call to LLVMContextCreate() or memory
@@ -843,6 +865,63 @@ LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M);
LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
/**
+ * Obtain an iterator to the first NamedMDNode in a Module.
+ *
+ * @see llvm::Module::named_metadata_begin()
+ */
+LLVMNamedMDNodeRef LLVMGetFirstNamedMetadata(LLVMModuleRef M);
+
+/**
+ * Obtain an iterator to the last NamedMDNode in a Module.
+ *
+ * @see llvm::Module::named_metadata_end()
+ */
+LLVMNamedMDNodeRef LLVMGetLastNamedMetadata(LLVMModuleRef M);
+
+/**
+ * Advance a NamedMDNode iterator to the next NamedMDNode.
+ *
+ * Returns NULL if the iterator was already at the end and there are no more
+ * named metadata nodes.
+ */
+LLVMNamedMDNodeRef LLVMGetNextNamedMetadata(LLVMNamedMDNodeRef NamedMDNode);
+
+/**
+ * Decrement a NamedMDNode iterator to the previous NamedMDNode.
+ *
+ * Returns NULL if the iterator was already at the beginning and there are
+ * no previous named metadata nodes.
+ */
+LLVMNamedMDNodeRef LLVMGetPreviousNamedMetadata(LLVMNamedMDNodeRef NamedMDNode);
+
+/**
+ * Retrieve a NamedMDNode with the given name, returning NULL if no such
+ * node exists.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ */
+LLVMNamedMDNodeRef LLVMGetNamedMetadata(LLVMModuleRef M,
+ const char *Name, size_t NameLen);
+
+/**
+ * Retrieve a NamedMDNode with the given name, creating a new node if no such
+ * node exists.
+ *
+ * @see llvm::Module::getOrInsertNamedMetadata()
+ */
+LLVMNamedMDNodeRef LLVMGetOrInsertNamedMetadata(LLVMModuleRef M,
+ const char *Name,
+ size_t NameLen);
+
+/**
+ * Retrieve the name of a NamedMDNode.
+ *
+ * @see llvm::NamedMDNode::getName()
+ */
+const char *LLVMGetNamedMetadataName(LLVMNamedMDNodeRef NamedMD,
+ size_t *NameLen);
+
+/**
* Obtain the number of operands for named metadata in a module.
*
* @see llvm::Module::getNamedMetadata()
@@ -873,6 +952,44 @@ void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char *Name,
LLVMValueRef Val);
/**
+ * Return the directory of the debug location for this value, which must be
+ * an llvm::Instruction, llvm::GlobalVariable, or llvm::Function.
+ *
+ * @see llvm::Instruction::getDebugLoc()
+ * @see llvm::GlobalVariable::getDebugInfo()
+ * @see llvm::Function::getSubprogram()
+ */
+const char *LLVMGetDebugLocDirectory(LLVMValueRef Val, unsigned *Length);
+
+/**
+ * Return the filename of the debug location for this value, which must be
+ * an llvm::Instruction, llvm::GlobalVariable, or llvm::Function.
+ *
+ * @see llvm::Instruction::getDebugLoc()
+ * @see llvm::GlobalVariable::getDebugInfo()
+ * @see llvm::Function::getSubprogram()
+ */
+const char *LLVMGetDebugLocFilename(LLVMValueRef Val, unsigned *Length);
+
+/**
+ * Return the line number of the debug location for this value, which must be
+ * an llvm::Instruction, llvm::GlobalVariable, or llvm::Function.
+ *
+ * @see llvm::Instruction::getDebugLoc()
+ * @see llvm::GlobalVariable::getDebugInfo()
+ * @see llvm::Function::getSubprogram()
+ */
+unsigned LLVMGetDebugLocLine(LLVMValueRef Val);
+
+/**
+ * Return the column number of the debug location for this value, which must be
+ * an llvm::Instruction.
+ *
+ * @see llvm::Instruction::getDebugLoc()
+ */
+unsigned LLVMGetDebugLocColumn(LLVMValueRef Val);
+
+/**
* Add a function to a module under a specified name.
*
* @see llvm::Function::Create()
@@ -1222,6 +1339,13 @@ LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy);
LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy);
/**
+ * Determine whether a structure is literal.
+ *
+ * @see llvm::StructType::isLiteral()
+ */
+LLVMBool LLVMIsLiteralStruct(LLVMTypeRef StructTy);
+
+/**
* @}
*/
@@ -1408,6 +1532,7 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(ConstantVector) \
macro(GlobalValue) \
macro(GlobalAlias) \
+ macro(GlobalIFunc) \
macro(GlobalObject) \
macro(Function) \
macro(GlobalVariable) \
@@ -1417,7 +1542,9 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(CallInst) \
macro(IntrinsicInst) \
macro(DbgInfoIntrinsic) \
- macro(DbgDeclareInst) \
+ macro(DbgVariableIntrinsic) \
+ macro(DbgDeclareInst) \
+ macro(DbgLabelInst) \
macro(MemIntrinsic) \
macro(MemCpyInst) \
macro(MemMoveInst) \
@@ -1434,16 +1561,15 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(SelectInst) \
macro(ShuffleVectorInst) \
macro(StoreInst) \
- macro(TerminatorInst) \
- macro(BranchInst) \
- macro(IndirectBrInst) \
- macro(InvokeInst) \
- macro(ReturnInst) \
- macro(SwitchInst) \
- macro(UnreachableInst) \
- macro(ResumeInst) \
- macro(CleanupReturnInst) \
- macro(CatchReturnInst) \
+ macro(BranchInst) \
+ macro(IndirectBrInst) \
+ macro(InvokeInst) \
+ macro(ReturnInst) \
+ macro(SwitchInst) \
+ macro(UnreachableInst) \
+ macro(ResumeInst) \
+ macro(CleanupReturnInst) \
+ macro(CatchReturnInst) \
macro(FuncletPadInst) \
macro(CatchPadInst) \
macro(CleanupPadInst) \
@@ -1959,9 +2085,14 @@ LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices, unsigned NumIndices);
+LLVMValueRef LLVMConstGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices, unsigned NumIndices);
LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices);
+LLVMValueRef LLVMConstInBoundsGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices);
LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
@@ -2037,6 +2168,14 @@ void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class);
LLVMUnnamedAddr LLVMGetUnnamedAddress(LLVMValueRef Global);
void LLVMSetUnnamedAddress(LLVMValueRef Global, LLVMUnnamedAddr UnnamedAddr);
+/**
+ * Returns the "value type" of a global value. This differs from the formal
+ * type of a global value which is always a pointer type.
+ *
+ * @see llvm::GlobalValue::getValueType()
+ */
+LLVMTypeRef LLVMGlobalGetValueType(LLVMValueRef Global);
+
/** Deprecated: Use LLVMGetUnnamedAddress instead. */
LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global);
/** Deprecated: Use LLVMSetUnnamedAddress instead. */
@@ -2068,6 +2207,58 @@ unsigned LLVMGetAlignment(LLVMValueRef V);
void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes);
/**
+ * Sets a metadata attachment, erasing the existing metadata attachment if
+ * it already exists for the given kind.
+ *
+ * @see llvm::GlobalObject::setMetadata()
+ */
+void LLVMGlobalSetMetadata(LLVMValueRef Global, unsigned Kind,
+ LLVMMetadataRef MD);
+
+/**
+ * Erases a metadata attachment of the given kind if it exists.
+ *
+ * @see llvm::GlobalObject::eraseMetadata()
+ */
+void LLVMGlobalEraseMetadata(LLVMValueRef Global, unsigned Kind);
+
+/**
+ * Removes all metadata attachments from this value.
+ *
+ * @see llvm::GlobalObject::clearMetadata()
+ */
+void LLVMGlobalClearMetadata(LLVMValueRef Global);
+
+/**
+ * Retrieves an array of metadata entries representing the metadata attached to
+ * this value. The caller is responsible for freeing this array by calling
+ * \c LLVMDisposeValueMetadataEntries.
+ *
+ * @see llvm::GlobalObject::getAllMetadata()
+ */
+LLVMValueMetadataEntry *LLVMGlobalCopyAllMetadata(LLVMValueRef Value,
+ size_t *NumEntries);
+
+/**
+ * Destroys value metadata entries.
+ */
+void LLVMDisposeValueMetadataEntries(LLVMValueMetadataEntry *Entries);
+
+/**
+ * Returns the kind of a value metadata entry at a specific index.
+ */
+unsigned LLVMValueMetadataEntriesGetKind(LLVMValueMetadataEntry *Entries,
+ unsigned Index);
+
+/**
+ * Returns the underlying metadata node of a value metadata entry at a
+ * specific index.
+ */
+LLVMMetadataRef
+LLVMValueMetadataEntriesGetMetadata(LLVMValueMetadataEntry *Entries,
+ unsigned Index);
+
+/**
* @}
*/
@@ -2218,6 +2409,54 @@ void LLVMSetPersonalityFn(LLVMValueRef Fn, LLVMValueRef PersonalityFn);
unsigned LLVMGetIntrinsicID(LLVMValueRef Fn);
/**
+ * Create or insert the declaration of an intrinsic. For overloaded intrinsics,
+ * parameter types must be provided to uniquely identify an overload.
+ *
+ * @see llvm::Intrinsic::getDeclaration()
+ */
+LLVMValueRef LLVMGetIntrinsicDeclaration(LLVMModuleRef Mod,
+ unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount);
+
+/**
+ * Retrieves the type of an intrinsic. For overloaded intrinsics, parameter
+ * types must be provided to uniquely identify an overload.
+ *
+ * @see llvm::Intrinsic::getType()
+ */
+LLVMTypeRef LLVMIntrinsicGetType(LLVMContextRef Ctx, unsigned ID,
+ LLVMTypeRef *ParamTypes, size_t ParamCount);
+
+/**
+ * Retrieves the name of an intrinsic.
+ *
+ * @see llvm::Intrinsic::getName()
+ */
+const char *LLVMIntrinsicGetName(unsigned ID, size_t *NameLength);
+
+/**
+ * Copies the name of an overloaded intrinsic identified by a given list of
+ * parameter types.
+ *
+ * Unlike LLVMIntrinsicGetName, the caller is responsible for freeing the
+ * returned string.
+ *
+ * @see llvm::Intrinsic::getName()
+ */
+const char *LLVMIntrinsicCopyOverloadedName(unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount,
+ size_t *NameLength);
+
+/**
+ * Obtain if the intrinsic identified by the given ID is overloaded.
+ *
+ * @see llvm::Intrinsic::isOverloaded()
+ */
+LLVMBool LLVMIntrinsicIsOverloaded(unsigned ID);
+
+/**
* Obtain the calling function of a function.
*
* The returned value corresponds to the LLVMCallConv enumeration.
@@ -2514,7 +2753,7 @@ LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB);
* If the basic block does not have a terminator (it is not well-formed
* if it doesn't), then NULL is returned.
*
- * The returned LLVMValueRef corresponds to a llvm::TerminatorInst.
+ * The returned LLVMValueRef corresponds to an llvm::Instruction.
*
* @see llvm::BasicBlock::getTerminator()
*/
@@ -2573,6 +2812,14 @@ LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB);
LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn);
/**
+ * Create a new basic block without inserting it into a function.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMCreateBasicBlockInContext(LLVMContextRef C,
+ const char *Name);
+
+/**
* Append a basic block to the end of a function.
*
* @see llvm::BasicBlock::Create()
@@ -2695,6 +2942,16 @@ LLVMValueRef LLVMGetMetadata(LLVMValueRef Val, unsigned KindID);
void LLVMSetMetadata(LLVMValueRef Val, unsigned KindID, LLVMValueRef Node);
/**
+ * Returns the metadata associated with an instruction value, but filters out
+ * all the debug locations.
+ *
+ * @see llvm::Instruction::getAllMetadataOtherThanDebugLoc()
+ */
+LLVMValueMetadataEntry *
+LLVMInstructionGetAllMetadataOtherThanDebugLoc(LLVMValueRef Instr,
+ size_t *NumEntries);
+
+/**
* Obtain the basic block to which an instruction belongs.
*
* @see llvm::Instruction::getParent()
@@ -2777,6 +3034,15 @@ LLVMRealPredicate LLVMGetFCmpPredicate(LLVMValueRef Inst);
LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst);
/**
+ * Determine whether an instruction is a terminator. This routine is named to
+ * be compatible with historical functions that did this by querying the
+ * underlying C++ type.
+ *
+ * @see llvm::Instruction::isTerminator()
+ */
+LLVMValueRef LLVMIsATerminatorInst(LLVMValueRef Inst);
+
+/**
* @defgroup LLVMCCoreValueInstructionCall Call Sites and Invocations
*
* Functions in this group apply to instructions that refer to call
@@ -2839,6 +3105,13 @@ void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
const char *K, unsigned KLen);
/**
+ * Obtain the function type called by this instruction.
+ *
+ * @see llvm::CallBase::getFunctionType()
+ */
+LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef C);
+
+/**
* Obtain the pointer to the function invoked by this instruction.
*
* This expects an LLVMValueRef that corresponds to a llvm::CallInst or
@@ -2916,8 +3189,8 @@ void LLVMSetUnwindDest(LLVMValueRef InvokeInst, LLVMBasicBlockRef B);
/**
* @defgroup LLVMCCoreValueInstructionTerminator Terminators
*
- * Functions in this group only apply to instructions that map to
- * llvm::TerminatorInst instances.
+ * Functions in this group only apply to instructions for which
+ * LLVMIsATerminatorInst returns true.
*
* @{
*/
@@ -2925,21 +3198,21 @@ void LLVMSetUnwindDest(LLVMValueRef InvokeInst, LLVMBasicBlockRef B);
/**
* Return the number of successors that this terminator has.
*
- * @see llvm::TerminatorInst::getNumSuccessors
+ * @see llvm::Instruction::getNumSuccessors
*/
unsigned LLVMGetNumSuccessors(LLVMValueRef Term);
/**
* Return the specified successor.
*
- * @see llvm::TerminatorInst::getSuccessor
+ * @see llvm::Instruction::getSuccessor
*/
LLVMBasicBlockRef LLVMGetSuccessor(LLVMValueRef Term, unsigned i);
/**
* Update the specified successor to point at the provided block.
*
- * @see llvm::TerminatorInst::setSuccessor
+ * @see llvm::Instruction::setSuccessor
*/
void LLVMSetSuccessor(LLVMValueRef Term, unsigned i, LLVMBasicBlockRef block);
@@ -3130,10 +3403,16 @@ LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef, LLVMValueRef V,
LLVMBasicBlockRef Else, unsigned NumCases);
LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
unsigned NumDests);
+// LLVMBuildInvoke is deprecated in favor of LLVMBuildInvoke2, in preparation
+// for opaque pointer types.
LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
const char *Name);
+LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name);
LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef);
/* Exception Handling */
@@ -3290,13 +3569,48 @@ LLVMValueRef LLVMBuildNot(LLVMBuilderRef, LLVMValueRef V, const char *Name);
LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty,
LLVMValueRef Val, const char *Name);
+
+/**
+ * Creates and inserts a memset to the specified pointer and the
+ * specified value.
+ *
+ * @see llvm::IRRBuilder::CreateMemSet()
+ */
+LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
+ LLVMValueRef Val, LLVMValueRef Len,
+ unsigned Align);
+/**
+ * Creates and inserts a memcpy between the specified pointers.
+ *
+ * @see llvm::IRRBuilder::CreateMemCpy()
+ */
+LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size);
+/**
+ * Creates and inserts a memmove between the specified pointers.
+ *
+ * @see llvm::IRRBuilder::CreateMemMove()
+ */
+LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size);
+
LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef, LLVMTypeRef Ty,
LLVMValueRef Val, const char *Name);
LLVMValueRef LLVMBuildFree(LLVMBuilderRef, LLVMValueRef PointerVal);
+// LLVMBuildLoad is deprecated in favor of LLVMBuildLoad2, in preparation for
+// opaque pointer types.
LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal,
const char *Name);
+LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef, LLVMTypeRef Ty,
+ LLVMValueRef PointerVal, const char *Name);
LLVMValueRef LLVMBuildStore(LLVMBuilderRef, LLVMValueRef Val, LLVMValueRef Ptr);
+// LLVMBuildGEP, LLVMBuildInBoundsGEP, and LLVMBuildStructGEP are deprecated in
+// favor of LLVMBuild*GEP2, in preparation for opaque pointer types.
LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name);
@@ -3305,6 +3619,15 @@ LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
const char *Name);
LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
unsigned Idx, const char *Name);
+LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name);
+LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name);
+LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, unsigned Idx,
+ const char *Name);
LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
const char *Name);
LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
@@ -3351,11 +3674,16 @@ LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val,
LLVMTypeRef DestTy, const char *Name);
LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef, LLVMValueRef Val,
LLVMTypeRef DestTy, const char *Name);
-LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef, LLVMValueRef Val, /*Signed cast!*/
- LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildIntCast2(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, LLVMBool IsSigned,
+ const char *Name);
LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef, LLVMValueRef Val,
LLVMTypeRef DestTy, const char *Name);
+/** Deprecated: This cast is always signed. Use LLVMBuildIntCast2 instead. */
+LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef, LLVMValueRef Val, /*Signed cast!*/
+ LLVMTypeRef DestTy, const char *Name);
+
/* Comparisons */
LLVMValueRef LLVMBuildICmp(LLVMBuilderRef, LLVMIntPredicate Op,
LLVMValueRef LHS, LLVMValueRef RHS,
@@ -3366,9 +3694,14 @@ LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef, LLVMRealPredicate Op,
/* Miscellaneous instructions */
LLVMValueRef LLVMBuildPhi(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
+// LLVMBuildCall is deprecated in favor of LLVMBuildCall2, in preparation for
+// opaque pointer types.
LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name);
+LLVMValueRef LLVMBuildCall2(LLVMBuilderRef, LLVMTypeRef, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name);
LLVMValueRef LLVMBuildSelect(LLVMBuilderRef, LLVMValueRef If,
LLVMValueRef Then, LLVMValueRef Else,
const char *Name);
diff --git a/contrib/llvm/include/llvm-c/DebugInfo.h b/contrib/llvm/include/llvm-c/DebugInfo.h
index cee6755f1874..87a72034b0e8 100644
--- a/contrib/llvm/include/llvm-c/DebugInfo.h
+++ b/contrib/llvm/include/llvm-c/DebugInfo.h
@@ -54,9 +54,12 @@ typedef enum {
LLVMDIFlagMainSubprogram = 1 << 21,
LLVMDIFlagTypePassByValue = 1 << 22,
LLVMDIFlagTypePassByReference = 1 << 23,
- LLVMDIFlagFixedEnum = 1 << 24,
+ LLVMDIFlagEnumClass = 1 << 24,
+ LLVMDIFlagFixedEnum = LLVMDIFlagEnumClass, // Deprecated.
LLVMDIFlagThunk = 1 << 25,
LLVMDIFlagTrivial = 1 << 26,
+ LLVMDIFlagBigEndian = 1 << 27,
+ LLVMDIFlagLittleEndian = 1 << 28,
LLVMDIFlagIndirectVirtualBase = (1 << 2) | (1 << 5),
LLVMDIFlagAccessibility = LLVMDIFlagPrivate | LLVMDIFlagProtected |
LLVMDIFlagPublic,
@@ -125,6 +128,44 @@ typedef enum {
} LLVMDWARFEmissionKind;
/**
+ * The kind of metadata nodes.
+ */
+enum {
+ LLVMMDStringMetadataKind,
+ LLVMConstantAsMetadataMetadataKind,
+ LLVMLocalAsMetadataMetadataKind,
+ LLVMDistinctMDOperandPlaceholderMetadataKind,
+ LLVMMDTupleMetadataKind,
+ LLVMDILocationMetadataKind,
+ LLVMDIExpressionMetadataKind,
+ LLVMDIGlobalVariableExpressionMetadataKind,
+ LLVMGenericDINodeMetadataKind,
+ LLVMDISubrangeMetadataKind,
+ LLVMDIEnumeratorMetadataKind,
+ LLVMDIBasicTypeMetadataKind,
+ LLVMDIDerivedTypeMetadataKind,
+ LLVMDICompositeTypeMetadataKind,
+ LLVMDISubroutineTypeMetadataKind,
+ LLVMDIFileMetadataKind,
+ LLVMDICompileUnitMetadataKind,
+ LLVMDISubprogramMetadataKind,
+ LLVMDILexicalBlockMetadataKind,
+ LLVMDILexicalBlockFileMetadataKind,
+ LLVMDINamespaceMetadataKind,
+ LLVMDIModuleMetadataKind,
+ LLVMDITemplateTypeParameterMetadataKind,
+ LLVMDITemplateValueParameterMetadataKind,
+ LLVMDIGlobalVariableMetadataKind,
+ LLVMDILocalVariableMetadataKind,
+ LLVMDILabelMetadataKind,
+ LLVMDIObjCPropertyMetadataKind,
+ LLVMDIImportedEntityMetadataKind,
+ LLVMDIMacroMetadataKind,
+ LLVMDIMacroFileMetadataKind
+};
+typedef unsigned LLVMMetadataKind;
+
+/**
* An LLVM DWARF type encoding.
*/
typedef unsigned LLVMDWARFTypeEncoding;
@@ -531,11 +572,13 @@ LLVMDIBuilderCreateUnspecifiedType(LLVMDIBuilderRef Builder, const char *Name,
* \param NameLen Length of type name.
* \param SizeInBits Size of the type.
* \param Encoding DWARF encoding code, e.g. \c LLVMDWARFTypeEncoding_float.
+ * \param Flags Flags to encode optional attribute like endianity
*/
LLVMMetadataRef
LLVMDIBuilderCreateBasicType(LLVMDIBuilderRef Builder, const char *Name,
size_t NameLen, uint64_t SizeInBits,
- LLVMDWARFTypeEncoding Encoding);
+ LLVMDWARFTypeEncoding Encoding,
+ LLVMDIFlags Flags);
/**
* Create debugging information entry for a pointer.
@@ -965,21 +1008,15 @@ LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
* \param Expr The location of the global relative to the attached
* GlobalVariable.
* \param Decl Reference to the corresponding declaration.
+ * variables.
* \param AlignInBits Variable alignment(or 0 if no alignment attr was
* specified)
*/
-LLVMMetadataRef
-LLVMDIBuilderCreateGlobalVariableExpression(LLVMDIBuilderRef Builder,
- LLVMMetadataRef Scope,
- const char *Name, size_t NameLen,
- const char *Linkage, size_t LinkLen,
- LLVMMetadataRef File,
- unsigned LineNo,
- LLVMMetadataRef Ty,
- LLVMBool LocalToUnit,
- LLVMMetadataRef Expr,
- LLVMMetadataRef Decl,
- uint32_t AlignInBits);
+LLVMMetadataRef LLVMDIBuilderCreateGlobalVariableExpression(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LinkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Expr, LLVMMetadataRef Decl, uint32_t AlignInBits);
/**
* Create a new temporary \c MDNode. Suitable for use in constructing cyclic
* \c MDNode structures. A temporary \c MDNode is not uniqued, may be RAUW'd,
@@ -1025,17 +1062,11 @@ void LLVMMetadataReplaceAllUsesWith(LLVMMetadataRef TempTargetMetadata,
* \param AlignInBits Variable alignment(or 0 if no alignment attr was
* specified)
*/
-LLVMMetadataRef
-LLVMDIBuilderCreateTempGlobalVariableFwdDecl(LLVMDIBuilderRef Builder,
- LLVMMetadataRef Scope,
- const char *Name, size_t NameLen,
- const char *Linkage, size_t LnkLen,
- LLVMMetadataRef File,
- unsigned LineNo,
- LLVMMetadataRef Ty,
- LLVMBool LocalToUnit,
- LLVMMetadataRef Decl,
- uint32_t AlignInBits);
+LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LnkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Decl, uint32_t AlignInBits);
/**
* Insert a new llvm.dbg.declare intrinsic call before the given instruction.
@@ -1149,6 +1180,13 @@ LLVMMetadataRef LLVMGetSubprogram(LLVMValueRef Func);
*/
void LLVMSetSubprogram(LLVMValueRef Func, LLVMMetadataRef SP);
+/**
+ * Obtain the enumerated type of a Metadata instance.
+ *
+ * @see llvm::Metadata::getMetadataID()
+ */
+LLVMMetadataKind LLVMGetMetadataKind(LLVMMetadataRef Metadata);
+
#ifdef __cplusplus
} /* end extern "C" */
#endif
diff --git a/contrib/llvm/include/llvm-c/Error.h b/contrib/llvm/include/llvm-c/Error.h
new file mode 100644
index 000000000000..71e84661222b
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Error.h
@@ -0,0 +1,69 @@
+/*===------- llvm-c/Error.h - llvm::Error class C Interface -------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to LLVM's Error class. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_ERROR_H
+#define LLVM_C_ERROR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LLVMErrorSuccess 0
+
+/**
+ * Opaque reference to an error instance. Null serves as the 'success' value.
+ */
+typedef struct LLVMOpaqueError *LLVMErrorRef;
+
+/**
+ * Error type identifier.
+ */
+typedef const void *LLVMErrorTypeId;
+
+/**
+ * Returns the type id for the given error instance, which must be a failure
+ * value (i.e. non-null).
+ */
+LLVMErrorTypeId LLVMGetErrorTypeId(LLVMErrorRef Err);
+
+/**
+ * Dispose of the given error without handling it. This operation consumes the
+ * error, and the given LLVMErrorRef value is not usable once this call returns.
+ * Note: This method *only* needs to be called if the error is not being passed
+ * to some other consuming operation, e.g. LLVMGetErrorMessage.
+ */
+void LLVMConsumeError(LLVMErrorRef Err);
+
+/**
+ * Returns the given string's error message. This operation consumes the error,
+ * and the given LLVMErrorRef value is not usable once this call returns.
+ * The caller is responsible for disposing of the string by calling
+ * LLVMDisposeErrorMessage.
+ */
+char *LLVMGetErrorMessage(LLVMErrorRef Err);
+
+/**
+ * Dispose of the given error message.
+ */
+void LLVMDisposeErrorMessage(char *ErrMsg);
+
+/**
+ * Returns the type id for llvm StringError.
+ */
+LLVMErrorTypeId LLVMGetStringErrorTypeId();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/ExecutionEngine.h b/contrib/llvm/include/llvm-c/ExecutionEngine.h
index 49ae6fee45f0..e8ebef9ab15d 100644
--- a/contrib/llvm/include/llvm-c/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm-c/ExecutionEngine.h
@@ -186,7 +186,7 @@ void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM);
LLVMJITEventListenerRef LLVMCreateGDBRegistrationListener(void);
LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void);
-LLVMJITEventListenerRef LLVMCreateOprofileJITEventListener(void);
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void);
LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void);
/**
diff --git a/contrib/llvm/include/llvm-c/OptRemarks.h b/contrib/llvm/include/llvm-c/OptRemarks.h
new file mode 100644
index 000000000000..6a90394e711c
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/OptRemarks.h
@@ -0,0 +1,204 @@
+/*===-- llvm-c/OptRemarks.h - OptRemarks Public C Interface -------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides a public interface to an opt-remark library. *|
+|* LLVM provides an implementation of this interface. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_OPT_REMARKS_H
+#define LLVM_C_OPT_REMARKS_H
+
+#include "llvm-c/Core.h"
+#include "llvm-c/Types.h"
+#ifdef __cplusplus
+#include <cstddef>
+extern "C" {
+#else
+#include <stddef.h>
+#endif /* !defined(__cplusplus) */
+
+/**
+ * @defgroup LLVMCOPTREMARKS OptRemarks
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+#define OPT_REMARKS_API_VERSION 0
+
+/**
+ * String containing a buffer and a length. The buffer is not guaranteed to be
+ * zero-terminated.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+typedef struct {
+ const char *Str;
+ uint32_t Len;
+} LLVMOptRemarkStringRef;
+
+/**
+ * DebugLoc containing File, Line and Column.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+typedef struct {
+ // File:
+ LLVMOptRemarkStringRef SourceFile;
+ // Line:
+ uint32_t SourceLineNumber;
+ // Column:
+ uint32_t SourceColumnNumber;
+} LLVMOptRemarkDebugLoc;
+
+/**
+ * Element of the "Args" list. The key might give more information about what
+ * are the semantics of the value, e.g. "Callee" will tell you that the value
+ * is a symbol that names a function.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+typedef struct {
+ // e.g. "Callee"
+ LLVMOptRemarkStringRef Key;
+ // e.g. "malloc"
+ LLVMOptRemarkStringRef Value;
+
+ // "DebugLoc": Optional
+ LLVMOptRemarkDebugLoc DebugLoc;
+} LLVMOptRemarkArg;
+
+/**
+ * One remark entry.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+typedef struct {
+ // e.g. !Missed, !Passed
+ LLVMOptRemarkStringRef RemarkType;
+ // "Pass": Required
+ LLVMOptRemarkStringRef PassName;
+ // "Name": Required
+ LLVMOptRemarkStringRef RemarkName;
+ // "Function": Required
+ LLVMOptRemarkStringRef FunctionName;
+
+ // "DebugLoc": Optional
+ LLVMOptRemarkDebugLoc DebugLoc;
+ // "Hotness": Optional
+ uint32_t Hotness;
+ // "Args": Optional. It is an array of `num_args` elements.
+ uint32_t NumArgs;
+ LLVMOptRemarkArg *Args;
+} LLVMOptRemarkEntry;
+
+typedef struct LLVMOptRemarkOpaqueParser *LLVMOptRemarkParserRef;
+
+/**
+ * Creates a remark parser that can be used to read and parse the buffer located
+ * in \p Buf of size \p Size.
+ *
+ * \p Buf cannot be NULL.
+ *
+ * This function should be paired with LLVMOptRemarkParserDispose() to avoid
+ * leaking resources.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern LLVMOptRemarkParserRef LLVMOptRemarkParserCreate(const void *Buf,
+ uint64_t Size);
+
+/**
+ * Returns the next remark in the file.
+ *
+ * The value pointed to by the return value is invalidated by the next call to
+ * LLVMOptRemarkParserGetNext().
+ *
+ * If the parser reaches the end of the buffer, the return value will be NULL.
+ *
+ * In the case of an error, the return value will be NULL, and:
+ *
+ * 1) LLVMOptRemarkParserHasError() will return `1`.
+ *
+ * 2) LLVMOptRemarkParserGetErrorMessage() will return a descriptive error
+ * message.
+ *
+ * An error may occur if:
+ *
+ * 1) An argument is invalid.
+ *
+ * 2) There is a YAML parsing error. This type of error aborts parsing
+ * immediately and returns `1`. It can occur on malformed YAML.
+ *
+ * 3) Remark parsing error. If this type of error occurs, the parser won't call
+ * the handler and will continue to the next one. It can occur on malformed
+ * remarks, like missing or extra fields in the file.
+ *
+ * Here is a quick example of the usage:
+ *
+ * ```
+ * LLVMOptRemarkParserRef Parser = LLVMOptRemarkParserCreate(Buf, Size);
+ * LLVMOptRemarkEntry *Remark = NULL;
+ * while ((Remark == LLVMOptRemarkParserGetNext(Parser))) {
+ * // use Remark
+ * }
+ * bool HasError = LLVMOptRemarkParserHasError(Parser);
+ * LLVMOptRemarkParserDispose(Parser);
+ * ```
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern LLVMOptRemarkEntry *
+LLVMOptRemarkParserGetNext(LLVMOptRemarkParserRef Parser);
+
+/**
+ * Returns `1` if the parser encountered an error while parsing the buffer.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern LLVMBool LLVMOptRemarkParserHasError(LLVMOptRemarkParserRef Parser);
+
+/**
+ * Returns a null-terminated string containing an error message.
+ *
+ * In case of no error, the result is `NULL`.
+ *
+ * The memory of the string is bound to the lifetime of \p Parser. If
+ * LLVMOptRemarkParserDispose() is called, the memory of the string will be
+ * released.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern const char *
+LLVMOptRemarkParserGetErrorMessage(LLVMOptRemarkParserRef Parser);
+
+/**
+ * Releases all the resources used by \p Parser.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern void LLVMOptRemarkParserDispose(LLVMOptRemarkParserRef Parser);
+
+/**
+ * Returns the version of the opt-remarks dylib.
+ *
+ * \since OPT_REMARKS_API_VERSION=0
+ */
+extern uint32_t LLVMOptRemarkVersion(void);
+
+/**
+ * @} // endgoup LLVMCOPTREMARKS
+ */
+
+#ifdef __cplusplus
+}
+#endif /* !defined(__cplusplus) */
+
+#endif /* LLVM_C_OPT_REMARKS_H */
diff --git a/contrib/llvm/include/llvm-c/OrcBindings.h b/contrib/llvm/include/llvm-c/OrcBindings.h
index 9497f0d40776..570db87fee94 100644
--- a/contrib/llvm/include/llvm-c/OrcBindings.h
+++ b/contrib/llvm/include/llvm-c/OrcBindings.h
@@ -22,6 +22,7 @@
#ifndef LLVM_C_ORCBINDINGS_H
#define LLVM_C_ORCBINDINGS_H
+#include "llvm-c/Error.h"
#include "llvm-c/Object.h"
#include "llvm-c/TargetMachine.h"
@@ -36,8 +37,6 @@ typedef uint64_t (*LLVMOrcSymbolResolverFn)(const char *Name, void *LookupCtx);
typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
void *CallbackCtx);
-typedef enum { LLVMOrcErrSuccess = 0, LLVMOrcErrGeneric } LLVMOrcErrorCode;
-
/**
* Create an ORC JIT stack.
*
@@ -72,43 +71,41 @@ void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
/**
* Create a lazy compile callback.
*/
-LLVMOrcErrorCode
-LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
- LLVMOrcTargetAddress *RetAddr,
- LLVMOrcLazyCompileCallbackFn Callback,
- void *CallbackCtx);
+LLVMErrorRef LLVMOrcCreateLazyCompileCallback(
+ LLVMOrcJITStackRef JITStack, LLVMOrcTargetAddress *RetAddr,
+ LLVMOrcLazyCompileCallbackFn Callback, void *CallbackCtx);
/**
* Create a named indirect call stub.
*/
-LLVMOrcErrorCode LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
- const char *StubName,
- LLVMOrcTargetAddress InitAddr);
+LLVMErrorRef LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress InitAddr);
/**
* Set the pointer for the given indirect stub.
*/
-LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
- const char *StubName,
- LLVMOrcTargetAddress NewAddr);
+LLVMErrorRef LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress NewAddr);
/**
* Add module to be eagerly compiled.
*/
-LLVMOrcErrorCode
-LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
- LLVMOrcModuleHandle *RetHandle, LLVMModuleRef Mod,
- LLVMOrcSymbolResolverFn SymbolResolver,
- void *SymbolResolverCtx);
+LLVMErrorRef LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
/**
* Add module to be lazily compiled one function at a time.
*/
-LLVMOrcErrorCode
-LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
- LLVMOrcModuleHandle *RetHandle, LLVMModuleRef Mod,
- LLVMOrcSymbolResolverFn SymbolResolver,
- void *SymbolResolverCtx);
+LLVMErrorRef LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
/**
* Add an object file.
@@ -118,11 +115,11 @@ LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
* Clients should *not* dispose of the 'Obj' argument: the JIT will manage it
* from this call onwards.
*/
-LLVMOrcErrorCode LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
- LLVMOrcModuleHandle *RetHandle,
- LLVMMemoryBufferRef Obj,
- LLVMOrcSymbolResolverFn SymbolResolver,
- void *SymbolResolverCtx);
+LLVMErrorRef LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMMemoryBufferRef Obj,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
/**
* Remove a module set from the JIT.
@@ -130,29 +127,29 @@ LLVMOrcErrorCode LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
* This works for all modules that can be added via OrcAdd*, including object
* files.
*/
-LLVMOrcErrorCode LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
- LLVMOrcModuleHandle H);
+LLVMErrorRef LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle H);
/**
* Get symbol address from JIT instance.
*/
-LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
- LLVMOrcTargetAddress *RetAddr,
- const char *SymbolName);
+LLVMErrorRef LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ const char *SymbolName);
/**
* Get symbol address from JIT instance, searching only the specified
* handle.
*/
-LLVMOrcErrorCode LLVMOrcGetSymbolAddressIn(LLVMOrcJITStackRef JITStack,
- LLVMOrcTargetAddress *RetAddr,
- LLVMOrcModuleHandle H,
- const char *SymbolName);
+LLVMErrorRef LLVMOrcGetSymbolAddressIn(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ LLVMOrcModuleHandle H,
+ const char *SymbolName);
/**
* Dispose of an ORC JIT stack.
*/
-LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
+LLVMErrorRef LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
/**
* Register a JIT Event Listener.
diff --git a/contrib/llvm/include/llvm-c/TargetMachine.h b/contrib/llvm/include/llvm-c/TargetMachine.h
index 7f672b5d10d6..c06e9edc9aaf 100644
--- a/contrib/llvm/include/llvm-c/TargetMachine.h
+++ b/contrib/llvm/include/llvm-c/TargetMachine.h
@@ -39,12 +39,16 @@ typedef enum {
LLVMRelocDefault,
LLVMRelocStatic,
LLVMRelocPIC,
- LLVMRelocDynamicNoPic
+ LLVMRelocDynamicNoPic,
+ LLVMRelocROPI,
+ LLVMRelocRWPI,
+ LLVMRelocROPI_RWPI
} LLVMRelocMode;
typedef enum {
LLVMCodeModelDefault,
LLVMCodeModelJITDefault,
+ LLVMCodeModelTiny,
LLVMCodeModelSmall,
LLVMCodeModelKernel,
LLVMCodeModelMedium,
diff --git a/contrib/llvm/include/llvm-c/Transforms/AggressiveInstCombine.h b/contrib/llvm/include/llvm-c/Transforms/AggressiveInstCombine.h
new file mode 100644
index 000000000000..8756a22e917a
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/AggressiveInstCombine.h
@@ -0,0 +1,43 @@
+/*===-- AggressiveInstCombine.h ---------------------------------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMAggressiveInstCombine.a, *|
+|* which combines instructions to form fewer, simple IR instructions. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_AGGRESSIVEINSTCOMBINE_H
+#define LLVM_C_TRANSFORMS_AGGRESSIVEINSTCOMBINE_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsAggressiveInstCombine Aggressive Instruction Combining transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createAggressiveInstCombinerPass function. */
+void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
+
diff --git a/contrib/llvm/include/llvm-c/Transforms/Coroutines.h b/contrib/llvm/include/llvm-c/Transforms/Coroutines.h
new file mode 100644
index 000000000000..827e30fb2d7c
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/Coroutines.h
@@ -0,0 +1,55 @@
+/*===-- Coroutines.h - Coroutines Library C Interface -----------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMCoroutines.a, which *|
+|* implements various scalar transformations of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_COROUTINES_H
+#define LLVM_C_TRANSFORMS_COROUTINES_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsCoroutines Coroutine transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createCoroEarlyPass function. */
+void LLVMAddCoroEarlyPass(LLVMPassManagerRef PM);
+
+/** See llvm::createCoroSplitPass function. */
+void LLVMAddCoroSplitPass(LLVMPassManagerRef PM);
+
+/** See llvm::createCoroElidePass function. */
+void LLVMAddCoroElidePass(LLVMPassManagerRef PM);
+
+/** See llvm::createCoroCleanupPass function. */
+void LLVMAddCoroCleanupPass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/Scalar.h b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
index f55cdce86be9..3c3bb4eb9b82 100644
--- a/contrib/llvm/include/llvm-c/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
@@ -35,9 +35,6 @@ extern "C" {
/** See llvm::createAggressiveDCEPass function. */
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM);
-/** See llvm::createAggressiveInstCombinerPass function. */
-void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM);
-
/** See llvm::createBitTrackingDCEPass function. */
void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM);
@@ -95,6 +92,9 @@ void LLVMAddLoopUnrollAndJamPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnswitchPass function. */
void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM);
+/** See llvm::createLowerAtomicPass function. */
+void LLVMAddLowerAtomicPass(LLVMPassManagerRef PM);
+
/** See llvm::createMemCpyOptPass function. */
void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM);
@@ -153,6 +153,9 @@ void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM);
/** See llvm::createBasicAliasAnalysisPass function */
void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM);
+/** See llvm::createUnifyFunctionExitNodesPass function */
+void LLVMAddUnifyFunctionExitNodesPass(LLVMPassManagerRef PM);
+
/**
* @}
*/
diff --git a/contrib/llvm/include/llvm-c/Types.h b/contrib/llvm/include/llvm-c/Types.h
index 4a33542e86cc..ce1acf3e0421 100644
--- a/contrib/llvm/include/llvm-c/Types.h
+++ b/contrib/llvm/include/llvm-c/Types.h
@@ -90,6 +90,20 @@ typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
typedef struct LLVMOpaqueMetadata *LLVMMetadataRef;
/**
+ * Represents an LLVM Named Metadata Node.
+ *
+ * This models llvm::NamedMDNode.
+ */
+typedef struct LLVMOpaqueNamedMDNode *LLVMNamedMDNodeRef;
+
+/**
+ * Represents an entry in a Global Object's metadata attachments.
+ *
+ * This models std::pair<unsigned, MDNode *>
+ */
+typedef struct LLVMOpaqueValueMetadataEntry LLVMValueMetadataEntry;
+
+/**
* Represents an LLVM basic block builder.
*
* This models llvm::IRBuilder.
diff --git a/contrib/llvm/include/llvm-c/lto.h b/contrib/llvm/include/llvm-c/lto.h
index 1acd610f70ac..090cd34af4e9 100644
--- a/contrib/llvm/include/llvm-c/lto.h
+++ b/contrib/llvm/include/llvm-c/lto.h
@@ -44,7 +44,7 @@ typedef bool lto_bool_t;
* @{
*/
-#define LTO_API_VERSION 22
+#define LTO_API_VERSION 23
/**
* \since prior to LTO_API_VERSION=3
@@ -828,6 +828,16 @@ extern void thinlto_codegen_set_cache_size_bytes(thinlto_code_gen_t cg,
unsigned max_size_bytes);
/**
+ * Same as thinlto_codegen_set_cache_size_bytes, except the maximum size is in
+ * megabytes (2^20 bytes).
+ *
+ * \since LTO_API_VERSION=23
+ */
+extern void
+thinlto_codegen_set_cache_size_megabytes(thinlto_code_gen_t cg,
+ unsigned max_size_megabytes);
+
+/**
* Sets the maximum number of files in the cache directory. An unspecified
* default value will be applied. A value of 0 will be ignored.
*
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index 5c59af4c04ba..c6fa5ad674f6 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -870,13 +870,13 @@ public:
/// Factory for NaN values.
///
/// \param Negative - True iff the NaN generated should be negative.
- /// \param type - The unspecified fill bits for creating the NaN, 0 by
+ /// \param payload - The unspecified fill bits for creating the NaN, 0 by
/// default. The value is truncated as necessary.
static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
- unsigned type = 0) {
- if (type) {
- APInt fill(64, type);
- return getQNaN(Sem, Negative, &fill);
+ uint64_t payload = 0) {
+ if (payload) {
+ APInt intPayload(64, payload);
+ return getQNaN(Sem, Negative, &intPayload);
} else {
return getQNaN(Sem, Negative, nullptr);
}
@@ -1243,6 +1243,32 @@ inline APFloat maxnum(const APFloat &A, const APFloat &B) {
return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
}
+/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
+/// arguments, propagating NaNs and treating -0 as less than +0.
+LLVM_READONLY
+inline APFloat minimum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return A;
+ if (B.isNaN())
+ return B;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? A : B;
+ return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+}
+
+/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
+/// arguments, propagating NaNs and treating -0 as less than +0.
+LLVM_READONLY
+inline APFloat maximum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return A;
+ if (B.isNaN())
+ return B;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? B : A;
+ return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+}
+
} // namespace llvm
#undef APFLOAT_DISPATCH_ON_SEMANTICS
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index 6bf6b22fb010..6e106ff8bf5d 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -31,6 +31,7 @@ class raw_ostream;
template <typename T> class SmallVectorImpl;
template <typename T> class ArrayRef;
+template <typename T> class Optional;
class APInt;
@@ -84,7 +85,7 @@ public:
UP,
};
- static const WordType WORD_MAX = ~WordType(0);
+ static const WordType WORDTYPE_MAX = ~WordType(0);
private:
/// This union is used to store the integer value. When the
@@ -149,7 +150,7 @@ private:
unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
// Mask out the high bits.
- uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - WordBits);
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
if (isSingleWord())
U.VAL &= mask;
else
@@ -394,7 +395,7 @@ public:
/// This checks to see if the value has all bits of the APInt are set or not.
bool isAllOnesValue() const {
if (isSingleWord())
- return U.VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
+ return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
return countTrailingOnesSlowCase() == BitWidth;
}
@@ -495,7 +496,7 @@ public:
assert(numBits != 0 && "numBits must be non-zero");
assert(numBits <= BitWidth && "numBits out of range");
if (isSingleWord())
- return U.VAL == (WORD_MAX >> (APINT_BITS_PER_WORD - numBits));
+ return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
unsigned Ones = countTrailingOnesSlowCase();
return (numBits == Ones) &&
((Ones + countLeadingZerosSlowCase()) == BitWidth);
@@ -559,7 +560,7 @@ public:
///
/// \returns the all-ones value for an APInt of the specified bit-width.
static APInt getAllOnesValue(unsigned numBits) {
- return APInt(numBits, WORD_MAX, true);
+ return APInt(numBits, WORDTYPE_MAX, true);
}
/// Get the '0' value.
@@ -1104,6 +1105,12 @@ public:
APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
+ // Operations that saturate
+ APInt sadd_sat(const APInt &RHS) const;
+ APInt uadd_sat(const APInt &RHS) const;
+ APInt ssub_sat(const APInt &RHS) const;
+ APInt usub_sat(const APInt &RHS) const;
+
/// Array-indexing support.
///
/// \returns the bit value at bitPosition
@@ -1382,7 +1389,7 @@ public:
/// Set every bit to 1.
void setAllBits() {
if (isSingleWord())
- U.VAL = WORD_MAX;
+ U.VAL = WORDTYPE_MAX;
else
// Set all the bits in all the words.
memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
@@ -1394,7 +1401,7 @@ public:
///
/// Set the given bit to 1 whose position is given as "bitPosition".
void setBit(unsigned BitPosition) {
- assert(BitPosition <= BitWidth && "BitPosition out of range");
+ assert(BitPosition < BitWidth && "BitPosition out of range");
WordType Mask = maskBit(BitPosition);
if (isSingleWord())
U.VAL |= Mask;
@@ -1415,7 +1422,7 @@ public:
if (loBit == hiBit)
return;
if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
- uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
mask <<= loBit;
if (isSingleWord())
U.VAL |= mask;
@@ -1453,7 +1460,7 @@ public:
///
/// Set the given bit to 0 whose position is given as "bitPosition".
void clearBit(unsigned BitPosition) {
- assert(BitPosition <= BitWidth && "BitPosition out of range");
+ assert(BitPosition < BitWidth && "BitPosition out of range");
WordType Mask = ~maskBit(BitPosition);
if (isSingleWord())
U.VAL &= Mask;
@@ -1469,7 +1476,7 @@ public:
/// Toggle every bit to its opposite value.
void flipAllBits() {
if (isSingleWord()) {
- U.VAL ^= WORD_MAX;
+ U.VAL ^= WORDTYPE_MAX;
clearUnusedBits();
} else {
flipAllBitsSlowCase();
@@ -1758,7 +1765,7 @@ public:
/// referencing 2 in a space where 2 does no exist.
unsigned nearestLogBase2() const {
// Special case when we have a bitwidth of 1. If VAL is 1, then we
- // get 0. If VAL is 0, we get WORD_MAX which gets truncated to
+ // get 0. If VAL is 0, we get WORDTYPE_MAX which gets truncated to
// UINT32_MAX.
if (BitWidth == 1)
return U.VAL - 1;
@@ -2166,6 +2173,41 @@ APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
/// Return A sign-divided by B, rounded by the given rounding mode.
APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
+/// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range
+/// (e.g. 32 for i32).
+/// This function finds the smallest number n, such that
+/// (a) n >= 0 and q(n) = 0, or
+/// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all
+/// integers, belong to two different intervals [Rk, Rk+R),
+/// where R = 2^BW, and k is an integer.
+/// The idea here is to find when q(n) "overflows" 2^BW, while at the
+/// same time "allowing" subtraction. In unsigned modulo arithmetic a
+/// subtraction (treated as addition of negated numbers) would always
+/// count as an overflow, but here we want to allow values to decrease
+/// and increase as long as they are within the same interval.
+/// Specifically, adding of two negative numbers should not cause an
+/// overflow (as long as the magnitude does not exceed the bith width).
+/// On the other hand, given a positive number, adding a negative
+/// number to it can give a negative result, which would cause the
+/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
+/// treated as a special case of an overflow.
+///
+/// This function returns None if after finding k that minimizes the
+/// positive solution to q(n) = kR, both solutions are contained between
+/// two consecutive integers.
+///
+/// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation
+/// in arithmetic modulo 2^BW, and treating the values as signed) by the
+/// virtue of *signed* overflow. This function will *not* find such an n,
+/// however it may find a value of n satisfying the inequalities due to
+/// an *unsigned* overflow (if the values are treated as unsigned).
+/// To find a solution for a signed overflow, treat it as a problem of
+/// finding an unsigned overflow with a range with of BW-1.
+///
+/// The returned value may have a different bit width from the input
+/// coefficients.
+Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
+ unsigned RangeWidth);
} // End of APIntOps namespace
// See friend declaration above. This additional declaration is required in
diff --git a/contrib/llvm/include/llvm/ADT/Any.h b/contrib/llvm/include/llvm/ADT/Any.h
index c64c39987542..7faa4c963d3d 100644
--- a/contrib/llvm/include/llvm/ADT/Any.h
+++ b/contrib/llvm/include/llvm/ADT/Any.h
@@ -65,6 +65,16 @@ public:
typename std::enable_if<
llvm::conjunction<
llvm::negation<std::is_same<typename std::decay<T>::type, Any>>,
+ // We also disable this overload when an `Any` object can be
+ // converted to the parameter type because in that case, this
+ // constructor may combine with that conversion during overload
+ // resolution for determining copy constructibility, and then
+ // when we try to determine copy constructibility below we may
+ // infinitely recurse. This is being evaluated by the standards
+ // committee as a potential DR in `std::any` as well, but we're
+ // going ahead and adopting it to work-around usage of `Any` with
+ // types that need to be implicitly convertible from an `Any`.
+ llvm::negation<std::is_convertible<Any, typename std::decay<T>::type>>,
std::is_copy_constructible<typename std::decay<T>::type>>::value,
int>::type = 0>
Any(T &&Value) {
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 438c7d84c581..9ab1da7c6913 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -503,6 +503,23 @@ public:
return (*this)[Idx];
}
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ unsigned OldSize = Size;
+ unsigned NewSize = Size + 1;
+
+ // Resize, which will insert zeros.
+ // If we already fit then the unused bits will be already zero.
+ if (NewSize > getBitCapacity())
+ resize(NewSize, false);
+ else
+ Size = NewSize;
+
+ // If true, set single bit.
+ if (Val)
+ set(OldSize);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index ba60b7972a8f..1f50502fff92 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -25,6 +25,7 @@
#include <cassert>
#include <cstddef>
#include <cstring>
+#include <initializer_list>
#include <iterator>
#include <new>
#include <type_traits>
@@ -38,6 +39,34 @@ namespace detail {
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
struct DenseMapPair : public std::pair<KeyT, ValueT> {
+
+ // FIXME: Switch to inheriting constructors when we drop support for older
+ // clang versions.
+ // NOTE: This default constructor is declared with '{}' rather than
+ // '= default' to work around a separate bug in clang-3.8. This can
+ // also go when we switch to inheriting constructors.
+ DenseMapPair() {}
+
+ DenseMapPair(const KeyT &Key, const ValueT &Value)
+ : std::pair<KeyT, ValueT>(Key, Value) {}
+
+ DenseMapPair(KeyT &&Key, ValueT &&Value)
+ : std::pair<KeyT, ValueT>(std::move(Key), std::move(Value)) {}
+
+ template <typename AltKeyT, typename AltValueT>
+ DenseMapPair(AltKeyT &&AltKey, AltValueT &&AltValue,
+ typename std::enable_if<
+ std::is_convertible<AltKeyT, KeyT>::value &&
+ std::is_convertible<AltValueT, ValueT>::value>::type * = 0)
+ : std::pair<KeyT, ValueT>(std::forward<AltKeyT>(AltKey),
+ std::forward<AltValueT>(AltValue)) {}
+
+ template <typename AltPairT>
+ DenseMapPair(AltPairT &&AltPair,
+ typename std::enable_if<std::is_convertible<
+ AltPairT, std::pair<KeyT, ValueT>>::value>::type * = 0)
+ : std::pair<KeyT, ValueT>(std::forward<AltPairT>(AltPair)) {}
+
KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
@@ -46,9 +75,10 @@ struct DenseMapPair : public std::pair<KeyT, ValueT> {
} // end namespace detail
-template <
- typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
- typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
+ bool IsConst = false>
class DenseMapIterator;
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
@@ -393,7 +423,7 @@ protected:
setNumTombstones(other.getNumTombstones());
if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
- memcpy(getBuckets(), other.getBuckets(),
+ memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
getNumBuckets() * sizeof(BucketT));
else
for (size_t i = 0; i < getNumBuckets(); ++i) {
@@ -639,9 +669,43 @@ public:
}
};
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator==(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &KV : LHS) {
+ auto I = RHS.find(KV.first);
+ if (I == RHS.end() || I->second != KV.second)
+ return false;
+ }
+
+ return true;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator!=(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ return !(LHS == RHS);
+}
+
template <typename KeyT, typename ValueT,
typename KeyInfoT = DenseMapInfo<KeyT>,
- typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+ typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
KeyT, ValueT, KeyInfoT, BucketT> {
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
@@ -676,6 +740,11 @@ public:
this->insert(I, E);
}
+ DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
+ init(Vals.size());
+ this->insert(Vals.begin(), Vals.end());
+ }
+
~DenseMap() {
this->destroyAll();
operator delete(Buckets);
@@ -798,7 +867,7 @@ private:
template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
typename KeyInfoT = DenseMapInfo<KeyT>,
- typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+ typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
class SmallDenseMap
: public DenseMapBase<
SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
diff --git a/contrib/llvm/include/llvm/ADT/DenseSet.h b/contrib/llvm/include/llvm/ADT/DenseSet.h
index b495e25dd5e5..e85a38587e41 100644
--- a/contrib/llvm/include/llvm/ADT/DenseSet.h
+++ b/contrib/llvm/include/llvm/ADT/DenseSet.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cstddef>
@@ -67,7 +68,7 @@ public:
explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
DenseSetImpl(std::initializer_list<ValueT> Elems)
- : DenseSetImpl(Elems.size()) {
+ : DenseSetImpl(PowerOf2Ceil(Elems.size())) {
insert(Elems.begin(), Elems.end());
}
@@ -136,8 +137,8 @@ public:
public:
using difference_type = typename MapTy::const_iterator::difference_type;
using value_type = ValueT;
- using pointer = value_type *;
- using reference = value_type &;
+ using pointer = const value_type *;
+ using reference = const value_type &;
using iterator_category = std::forward_iterator_tag;
ConstIterator() = default;
@@ -214,6 +215,34 @@ public:
}
};
+/// Equality comparison for DenseSet.
+///
+/// Iterates over elements of LHS confirming that each element is also a member
+/// of RHS, and that RHS contains no additional values.
+/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst
+/// case is O(N^2) (if every hash collides).
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator==(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &E : LHS)
+ if (!RHS.count(E))
+ return false;
+
+ return true;
+}
+
+/// Inequality comparison for DenseSet.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator!=(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ return !(LHS == RHS);
+}
+
} // end namespace detail
/// Implements a dense probed hash-table based set.
diff --git a/contrib/llvm/include/llvm/ADT/GraphTraits.h b/contrib/llvm/include/llvm/ADT/GraphTraits.h
index 27c647f4bbbd..d39b50fdc488 100644
--- a/contrib/llvm/include/llvm/ADT/GraphTraits.h
+++ b/contrib/llvm/include/llvm/ADT/GraphTraits.h
@@ -25,6 +25,13 @@ namespace llvm {
// GraphTraits - This class should be specialized by different graph types...
// which is why the default version is empty.
//
+// This template evolved from supporting `BasicBlock` to also later supporting
+// more complex types (e.g. CFG and DomTree).
+//
+// GraphTraits can be used to create a view over a graph interpreting it
+// differently without requiring a copy of the original graph. This could
+// be achieved by carrying more data in NodeRef. See LoopBodyTraits for one
+// example.
template<class GraphType>
struct GraphTraits {
// Elements to provide:
diff --git a/contrib/llvm/include/llvm/ADT/Hashing.h b/contrib/llvm/include/llvm/ADT/Hashing.h
index 9f830baa4243..9175c545b7c9 100644
--- a/contrib/llvm/include/llvm/ADT/Hashing.h
+++ b/contrib/llvm/include/llvm/ADT/Hashing.h
@@ -133,7 +133,7 @@ hash_code hash_value(const std::basic_string<T> &arg);
/// undone. This makes it thread-hostile and very hard to use outside of
/// immediately on start of a simple program designed for reproducible
/// behavior.
-void set_fixed_execution_hash_seed(size_t fixed_value);
+void set_fixed_execution_hash_seed(uint64_t fixed_value);
// All of the implementation details of actually computing the various hash
@@ -316,9 +316,9 @@ struct hash_state {
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
/// set or read this variable.
-extern size_t fixed_seed_override;
+extern uint64_t fixed_seed_override;
-inline size_t get_execution_seed() {
+inline uint64_t get_execution_seed() {
// FIXME: This needs to be a per-execution seed. This is just a placeholder
// implementation. Switching to a per-execution seed is likely to flush out
// instability bugs and so will happen as its own commit.
@@ -326,8 +326,7 @@ inline size_t get_execution_seed() {
// However, if there is a fixed seed override set the first time this is
// called, return that instead of the per-execution seed.
const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
- static size_t seed = fixed_seed_override ? fixed_seed_override
- : (size_t)seed_prime;
+ static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
return seed;
}
@@ -402,7 +401,7 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
/// combining them, this (as an optimization) directly combines the integers.
template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
- const size_t seed = get_execution_seed();
+ const uint64_t seed = get_execution_seed();
char buffer[64], *buffer_ptr = buffer;
char *const buffer_end = std::end(buffer);
while (first != last && store_and_advance(buffer_ptr, buffer_end,
@@ -446,7 +445,7 @@ hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
template <typename ValueT>
typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
hash_combine_range_impl(ValueT *first, ValueT *last) {
- const size_t seed = get_execution_seed();
+ const uint64_t seed = get_execution_seed();
const char *s_begin = reinterpret_cast<const char *>(first);
const char *s_end = reinterpret_cast<const char *>(last);
const size_t length = std::distance(s_begin, s_end);
@@ -496,7 +495,7 @@ namespace detail {
struct hash_combine_recursive_helper {
char buffer[64];
hash_state state;
- const size_t seed;
+ const uint64_t seed;
public:
/// Construct a recursive hash combining helper.
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableList.h b/contrib/llvm/include/llvm/ADT/ImmutableList.h
index 1f5e9813798d..0541dc2566ed 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableList.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableList.h
@@ -31,8 +31,9 @@ class ImmutableListImpl : public FoldingSetNode {
T Head;
const ImmutableListImpl* Tail;
- ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
- : Head(head), Tail(tail) {}
+ template <typename ElemT>
+ ImmutableListImpl(ElemT &&head, const ImmutableListImpl *tail = nullptr)
+ : Head(std::forward<ElemT>(head)), Tail(tail) {}
public:
ImmutableListImpl(const ImmutableListImpl &) = delete;
@@ -66,6 +67,9 @@ public:
using value_type = T;
using Factory = ImmutableListFactory<T>;
+ static_assert(std::is_trivially_destructible<T>::value,
+ "T must be trivially destructible!");
+
private:
const ImmutableListImpl<T>* X;
@@ -90,6 +94,9 @@ public:
bool operator==(const iterator& I) const { return L == I.L; }
bool operator!=(const iterator& I) const { return L != I.L; }
const value_type& operator*() const { return L->getHead(); }
+ const typename std::remove_reference<value_type>::type* operator->() const {
+ return &L->getHead();
+ }
ImmutableList getList() const { return L; }
};
@@ -123,14 +130,14 @@ public:
bool operator==(const ImmutableList& L) const { return isEqual(L); }
/// getHead - Returns the head of the list.
- const T& getHead() {
+ const T& getHead() const {
assert(!isEmpty() && "Cannot get the head of an empty list.");
return X->getHead();
}
/// getTail - Returns the tail of the list, which is another (possibly empty)
/// ImmutableList.
- ImmutableList getTail() {
+ ImmutableList getTail() const {
return X ? X->getTail() : nullptr;
}
@@ -166,7 +173,8 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
- LLVM_NODISCARD ImmutableList<T> concat(const T &Head, ImmutableList<T> Tail) {
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> concat(ElemT &&Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
@@ -179,7 +187,7 @@ public:
// The list does not exist in our cache. Create it.
BumpPtrAllocator& A = getAllocator();
L = (ListTy*) A.Allocate<ListTy>();
- new (L) ListTy(Head, TailImpl);
+ new (L) ListTy(std::forward<ElemT>(Head), TailImpl);
// Insert the new list into the cache.
Cache.InsertNode(L, InsertPos);
@@ -188,16 +196,24 @@ public:
return L;
}
- LLVM_NODISCARD ImmutableList<T> add(const T& D, ImmutableList<T> L) {
- return concat(D, L);
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> add(ElemT &&Data, ImmutableList<T> L) {
+ return concat(std::forward<ElemT>(Data), L);
+ }
+
+ template <typename ...CtorArgs>
+ LLVM_NODISCARD ImmutableList<T> emplace(ImmutableList<T> Tail,
+ CtorArgs &&...Args) {
+ return concat(T(std::forward<CtorArgs>(Args)...), Tail);
}
ImmutableList<T> getEmptyList() const {
return ImmutableList<T>(nullptr);
}
- ImmutableList<T> create(const T& X) {
- return Concat(X, getEmptyList());
+ template <typename ElemT>
+ ImmutableList<T> create(ElemT &&Data) {
+ return concat(std::forward<ElemT>(Data), getEmptyList());
}
};
diff --git a/contrib/llvm/include/llvm/ADT/IntervalMap.h b/contrib/llvm/include/llvm/ADT/IntervalMap.h
index f71366811218..2af61049e5af 100644
--- a/contrib/llvm/include/llvm/ADT/IntervalMap.h
+++ b/contrib/llvm/include/llvm/ADT/IntervalMap.h
@@ -101,6 +101,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/bit.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/RecyclingAllocator.h"
@@ -963,6 +964,7 @@ public:
private:
// The root data is either a RootLeaf or a RootBranchData instance.
+ LLVM_ALIGNAS(RootLeaf) LLVM_ALIGNAS(RootBranchData)
AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
// Tree height.
@@ -977,15 +979,10 @@ private:
// Allocator used for creating external nodes.
Allocator &allocator;
- /// dataAs - Represent data as a node type without breaking aliasing rules.
+ /// Represent data as a node type without breaking aliasing rules.
template <typename T>
T &dataAs() const {
- union {
- const char *d;
- T *t;
- } u;
- u.d = data.buffer;
- return *u.t;
+ return *bit_cast<T *>(const_cast<char *>(data.buffer));
}
const RootLeaf &rootLeaf() const {
@@ -1137,6 +1134,19 @@ public:
I.find(x);
return I;
}
+
+ /// overlaps(a, b) - Return true if the intervals in this map overlap with the
+ /// interval [a;b].
+ bool overlaps(KeyT a, KeyT b) {
+ assert(Traits::nonEmpty(a, b));
+ const_iterator I = find(a);
+ if (!I.valid())
+ return false;
+ // [a;b] and [x;y] overlap iff x<=b and a<=y. The find() call guarantees the
+ // second part (y = find(a).stop()), so it is sufficient to check the first
+ // one.
+ return !Traits::stopLess(b, I.start());
+ }
};
/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
diff --git a/contrib/llvm/include/llvm/ADT/Optional.h b/contrib/llvm/include/llvm/ADT/Optional.h
index 353e5d0ec9df..76937d632ae1 100644
--- a/contrib/llvm/include/llvm/ADT/Optional.h
+++ b/contrib/llvm/include/llvm/ADT/Optional.h
@@ -29,7 +29,7 @@ namespace llvm {
namespace optional_detail {
/// Storage for any type.
-template <typename T, bool IsPodLike> struct OptionalStorage {
+template <typename T, bool = isPodLike<T>::value> struct OptionalStorage {
AlignedCharArrayUnion<T> storage;
bool hasVal = false;
@@ -108,28 +108,10 @@ template <typename T, bool IsPodLike> struct OptionalStorage {
}
};
-#if !defined(__GNUC__) || defined(__clang__) // GCC up to GCC7 miscompiles this.
-/// Storage for trivially copyable types only.
-template <typename T> struct OptionalStorage<T, true> {
- AlignedCharArrayUnion<T> storage;
- bool hasVal = false;
-
- OptionalStorage() = default;
-
- OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
- OptionalStorage &operator=(const T &y) {
- *reinterpret_cast<T *>(storage.buffer) = y;
- hasVal = true;
- return *this;
- }
-
- void reset() { hasVal = false; }
-};
-#endif
} // namespace optional_detail
template <typename T> class Optional {
- optional_detail::OptionalStorage<T, isPodLike<T>::value> Storage;
+ optional_detail::OptionalStorage<T> Storage;
public:
using value_type = T;
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
index 884d05155bff..6d1b53a90ad2 100644
--- a/contrib/llvm/include/llvm/ADT/PointerIntPair.h
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -42,6 +42,8 @@ template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
class PointerIntPair {
+ // Used by MSVC visualizer and generally helpful for debugging/visualizing.
+ using InfoTy = Info;
intptr_t Value = 0;
public:
diff --git a/contrib/llvm/include/llvm/ADT/PointerSumType.h b/contrib/llvm/include/llvm/ADT/PointerSumType.h
index e37957160d98..a19e45a46218 100644
--- a/contrib/llvm/include/llvm/ADT/PointerSumType.h
+++ b/contrib/llvm/include/llvm/ADT/PointerSumType.h
@@ -10,6 +10,7 @@
#ifndef LLVM_ADT_POINTERSUMTYPE_H
#define LLVM_ADT_POINTERSUMTYPE_H
+#include "llvm/ADT/bit.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
@@ -58,56 +59,142 @@ template <typename TagT, typename... MemberTs> struct PointerSumTypeHelper;
/// and may be desirable to set to a state that is particularly desirable to
/// default construct.
///
+/// Having a supported zero-valued tag also enables getting the address of a
+/// pointer stored with that tag provided it is stored in its natural bit
+/// representation. This works because in the case of a zero-valued tag, the
+/// pointer's value is directly stored into this object and we can expose the
+/// address of that internal storage. This is especially useful when building an
+/// `ArrayRef` of a single pointer stored in a sum type.
+///
/// There is no support for constructing or accessing with a dynamic tag as
/// that would fundamentally violate the type safety provided by the sum type.
template <typename TagT, typename... MemberTs> class PointerSumType {
- uintptr_t Value = 0;
-
using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+ // We keep both the raw value and the min tag value's pointer in a union. When
+ // the minimum tag value is zero, this allows code below to cleanly expose the
+ // address of the zero-tag pointer instead of just the zero-tag pointer
+ // itself. This is especially useful when building `ArrayRef`s out of a single
+ // pointer. However, we have to carefully access the union due to the active
+ // member potentially changing. When we *store* a new value, we directly
+ // access the union to allow us to store using the obvious types. However,
+ // when we *read* a value, we copy the underlying storage out to avoid relying
+ // on one member or the other being active.
+ union StorageT {
+ // Ensure we get a null default constructed value. We don't use a member
+ // initializer because some compilers seem to not implement those correctly
+ // for a union.
+ StorageT() : Value(0) {}
+
+ uintptr_t Value;
+
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT MinTagPointer;
+ };
+
+ StorageT Storage;
+
public:
constexpr PointerSumType() = default;
+ /// A typed setter to a given tagged member of the sum type.
+ template <TagT N>
+ void set(typename HelperT::template Lookup<N>::PointerT Pointer) {
+ void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+ assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+ "Pointer is insufficiently aligned to store the discriminant!");
+ Storage.Value = reinterpret_cast<uintptr_t>(V) | N;
+ }
+
/// A typed constructor for a specific tagged member of the sum type.
template <TagT N>
static PointerSumType
create(typename HelperT::template Lookup<N>::PointerT Pointer) {
PointerSumType Result;
- void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
- assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
- "Pointer is insufficiently aligned to store the discriminant!");
- Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+ Result.set<N>(Pointer);
return Result;
}
- TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+ /// Clear the value to null with the min tag type.
+ void clear() { set<HelperT::MinTag>(nullptr); }
+
+ TagT getTag() const {
+ return static_cast<TagT>(getOpaqueValue() & HelperT::TagMask);
+ }
template <TagT N> bool is() const { return N == getTag(); }
template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
- void *P = is<N>() ? getImpl() : nullptr;
+ void *P = is<N>() ? getVoidPtr() : nullptr;
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
}
template <TagT N>
typename HelperT::template Lookup<N>::PointerT cast() const {
assert(is<N>() && "This instance has a different active member.");
- return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+ return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(
+ getVoidPtr());
+ }
+
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT const *
+ getAddrOfZeroTagPointer() const {
+ return const_cast<PointerSumType *>(this)->getAddrOfZeroTagPointer();
}
- explicit operator bool() const { return Value & HelperT::PointerMask; }
- bool operator==(const PointerSumType &R) const { return Value == R.Value; }
- bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
- bool operator<(const PointerSumType &R) const { return Value < R.Value; }
- bool operator>(const PointerSumType &R) const { return Value > R.Value; }
- bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
- bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT *
+ getAddrOfZeroTagPointer() {
+ static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
+ assert(is<HelperT::MinTag>() && "The active tag is not zero!");
+ // Store the initial value of the pointer when read out of our storage.
+ auto InitialPtr = get<HelperT::MinTag>();
+ // Now update the active member of the union to be the actual pointer-typed
+ // member so that accessing it indirectly through the returned address is
+ // valid.
+ Storage.MinTagPointer = InitialPtr;
+ // Finally, validate that this was a no-op as expected by reading it back
+ // out using the same underlying-storage read as above.
+ assert(InitialPtr == get<HelperT::MinTag>() &&
+ "Switching to typed storage changed the pointer returned!");
+ // Now we can correctly return an address to typed storage.
+ return &Storage.MinTagPointer;
+ }
+
+ explicit operator bool() const {
+ return getOpaqueValue() & HelperT::PointerMask;
+ }
+ bool operator==(const PointerSumType &R) const {
+ return getOpaqueValue() == R.getOpaqueValue();
+ }
+ bool operator!=(const PointerSumType &R) const {
+ return getOpaqueValue() != R.getOpaqueValue();
+ }
+ bool operator<(const PointerSumType &R) const {
+ return getOpaqueValue() < R.getOpaqueValue();
+ }
+ bool operator>(const PointerSumType &R) const {
+ return getOpaqueValue() > R.getOpaqueValue();
+ }
+ bool operator<=(const PointerSumType &R) const {
+ return getOpaqueValue() <= R.getOpaqueValue();
+ }
+ bool operator>=(const PointerSumType &R) const {
+ return getOpaqueValue() >= R.getOpaqueValue();
+ }
- uintptr_t getOpaqueValue() const { return Value; }
+ uintptr_t getOpaqueValue() const {
+ // Read the underlying storage of the union, regardless of the active
+ // member.
+ return bit_cast<uintptr_t>(Storage);
+ }
protected:
- void *getImpl() const {
- return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+ void *getVoidPtr() const {
+ return reinterpret_cast<void *>(getOpaqueValue() & HelperT::PointerMask);
}
};
@@ -151,8 +238,9 @@ struct PointerSumTypeHelper : MemberTs... {
enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
// Also compute the smallest discriminant and various masks for convenience.
+ constexpr static TagT MinTag =
+ static_cast<TagT>(Min<MemberTs::Tag...>::value);
enum : uint64_t {
- MinTag = Min<MemberTs::Tag...>::value,
PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
TagMask = ~PointerMask
};
diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
index dc8a9b6e78b2..d77b12228cb1 100644
--- a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -296,12 +296,15 @@ class ReversePostOrderTraversal {
public:
using rpo_iterator = typename std::vector<NodeRef>::reverse_iterator;
+ using const_rpo_iterator = typename std::vector<NodeRef>::const_reverse_iterator;
ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
// Because we want a reverse post order, use reverse iterators from the vector
rpo_iterator begin() { return Blocks.rbegin(); }
+ const_rpo_iterator begin() const { return Blocks.crbegin(); }
rpo_iterator end() { return Blocks.rend(); }
+ const_rpo_iterator end() const { return Blocks.crend(); }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/ADT/STLExtras.h b/contrib/llvm/include/llvm/ADT/STLExtras.h
index 94365dd9ced1..f66ca7c08a73 100644
--- a/contrib/llvm/include/llvm/ADT/STLExtras.h
+++ b/contrib/llvm/include/llvm/ADT/STLExtras.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
@@ -70,6 +71,16 @@ template <typename B1, typename... Bn>
struct conjunction<B1, Bn...>
: std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
+template <typename T> struct make_const_ptr {
+ using type =
+ typename std::add_pointer<typename std::add_const<T>::type>::type;
+};
+
+template <typename T> struct make_const_ref {
+ using type = typename std::add_lvalue_reference<
+ typename std::add_const<T>::type>::type;
+};
+
//===----------------------------------------------------------------------===//
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
@@ -194,6 +205,12 @@ void adl_swap(T &&lhs, T &&rhs) noexcept(
adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
}
+/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
+template <typename T>
+constexpr bool empty(const T &RangeOrContainer) {
+ return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
+}
+
// mapped_iterator - This is a simple iterator adapter that causes a function to
// be applied whenever operator* is invoked on the iterator.
@@ -418,9 +435,94 @@ make_filter_range(RangeT &&Range, PredicateT Pred) {
std::end(std::forward<RangeT>(Range)), Pred));
}
-// forward declarations required by zip_shortest/zip_first
+/// A pseudo-iterator adaptor that is designed to implement "early increment"
+/// style loops.
+///
+/// This is *not a normal iterator* and should almost never be used directly. It
+/// is intended primarily to be used with range based for loops and some range
+/// algorithms.
+///
+/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
+/// somewhere between them. The constraints of these iterators are:
+///
+/// - On construction or after being incremented, it is comparable and
+/// dereferencable. It is *not* incrementable.
+/// - After being dereferenced, it is neither comparable nor dereferencable, it
+/// is only incrementable.
+///
+/// This means you can only dereference the iterator once, and you can only
+/// increment it once between dereferences.
+template <typename WrappedIteratorT>
+class early_inc_iterator_impl
+ : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
+ WrappedIteratorT, std::input_iterator_tag> {
+ using BaseT =
+ iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
+ WrappedIteratorT, std::input_iterator_tag>;
+
+ using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
+
+protected:
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ bool IsEarlyIncremented = false;
+#endif
+
+public:
+ early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
+
+ using BaseT::operator*;
+ typename BaseT::reference operator*() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(!IsEarlyIncremented && "Cannot dereference twice!");
+ IsEarlyIncremented = true;
+#endif
+ return *(this->I)++;
+ }
+
+ using BaseT::operator++;
+ early_inc_iterator_impl &operator++() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(IsEarlyIncremented && "Cannot increment before dereferencing!");
+ IsEarlyIncremented = false;
+#endif
+ return *this;
+ }
+
+ using BaseT::operator==;
+ bool operator==(const early_inc_iterator_impl &RHS) const {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(!IsEarlyIncremented && "Cannot compare after dereferencing!");
+#endif
+ return BaseT::operator==(RHS);
+ }
+};
+
+/// Make a range that does early increment to allow mutation of the underlying
+/// range without disrupting iteration.
+///
+/// The underlying iterator will be incremented immediately after it is
+/// dereferenced, allowing deletion of the current node or insertion of nodes to
+/// not disrupt iteration provided they do not invalidate the *next* iterator --
+/// the current iterator can be invalidated.
+///
+/// This requires a very exact pattern of use that is only really suitable to
+/// range based for loops and other range algorithms that explicitly guarantee
+/// to dereference exactly once each element, and to increment exactly once each
+/// element.
+template <typename RangeT>
+iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
+make_early_inc_range(RangeT &&Range) {
+ using EarlyIncIteratorT =
+ early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
+ return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
+ EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+// forward declarations required by zip_shortest/zip_first/zip_longest
template <typename R, typename UnaryPredicate>
bool all_of(R &&range, UnaryPredicate P);
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&range, UnaryPredicate P);
template <size_t... I> struct index_sequence;
@@ -571,6 +673,132 @@ detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
}
+namespace detail {
+template <typename Iter>
+static Iter next_or_end(const Iter &I, const Iter &End) {
+ if (I == End)
+ return End;
+ return std::next(I);
+}
+
+template <typename Iter>
+static auto deref_or_none(const Iter &I, const Iter &End)
+ -> llvm::Optional<typename std::remove_const<
+ typename std::remove_reference<decltype(*I)>::type>::type> {
+ if (I == End)
+ return None;
+ return *I;
+}
+
+template <typename Iter> struct ZipLongestItemType {
+ using type =
+ llvm::Optional<typename std::remove_const<typename std::remove_reference<
+ decltype(*std::declval<Iter>())>::type>::type>;
+};
+
+template <typename... Iters> struct ZipLongestTupleType {
+ using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
+};
+
+template <typename... Iters>
+class zip_longest_iterator
+ : public iterator_facade_base<
+ zip_longest_iterator<Iters...>,
+ typename std::common_type<
+ std::forward_iterator_tag,
+ typename std::iterator_traits<Iters>::iterator_category...>::type,
+ typename ZipLongestTupleType<Iters...>::type,
+ typename std::iterator_traits<typename std::tuple_element<
+ 0, std::tuple<Iters...>>::type>::difference_type,
+ typename ZipLongestTupleType<Iters...>::type *,
+ typename ZipLongestTupleType<Iters...>::type> {
+public:
+ using value_type = typename ZipLongestTupleType<Iters...>::type;
+
+private:
+ std::tuple<Iters...> iterators;
+ std::tuple<Iters...> end_iterators;
+
+ template <size_t... Ns>
+ bool test(const zip_longest_iterator<Iters...> &other,
+ index_sequence<Ns...>) const {
+ return llvm::any_of(
+ std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+ std::get<Ns>(other.iterators)...},
+ identity<bool>{});
+ }
+
+ template <size_t... Ns> value_type deref(index_sequence<Ns...>) const {
+ return value_type(
+ deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
+ }
+
+ template <size_t... Ns>
+ decltype(iterators) tup_inc(index_sequence<Ns...>) const {
+ return std::tuple<Iters...>(
+ next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
+ }
+
+public:
+ zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
+ : iterators(std::forward<Iters>(ts.first)...),
+ end_iterators(std::forward<Iters>(ts.second)...) {}
+
+ value_type operator*() { return deref(index_sequence_for<Iters...>{}); }
+
+ value_type operator*() const { return deref(index_sequence_for<Iters...>{}); }
+
+ zip_longest_iterator<Iters...> &operator++() {
+ iterators = tup_inc(index_sequence_for<Iters...>{});
+ return *this;
+ }
+
+ bool operator==(const zip_longest_iterator<Iters...> &other) const {
+ return !test(other, index_sequence_for<Iters...>{});
+ }
+};
+
+template <typename... Args> class zip_longest_range {
+public:
+ using iterator =
+ zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename iterator::value_type;
+ using difference_type = typename iterator::difference_type;
+ using pointer = typename iterator::pointer;
+ using reference = typename iterator::reference;
+
+private:
+ std::tuple<Args...> ts;
+
+ template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const {
+ return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
+ adl_end(std::get<Ns>(ts)))...);
+ }
+
+ template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const {
+ return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
+ adl_end(std::get<Ns>(ts)))...);
+ }
+
+public:
+ zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+
+ iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); }
+ iterator end() const { return end_impl(index_sequence_for<Args...>{}); }
+};
+} // namespace detail
+
+/// Iterate over two or more iterators at the same time. Iteration continues
+/// until all iterators reach the end. The llvm::Optional only contains a value
+/// if the iterator has not reached the end.
+template <typename T, typename U, typename... Args>
+detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zip_longest_range<T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
/// Iterator wrapper that concatenates sequences together.
///
/// This can concatenate different iterators, even with different types, into
@@ -593,18 +821,20 @@ class concat_iterator
/// Note that something like iterator_range seems nice at first here, but the
/// range properties are of little benefit and end up getting in the way
/// because we need to do mutation on the current iterators.
- std::tuple<std::pair<IterTs, IterTs>...> IterPairs;
+ std::tuple<IterTs...> Begins;
+ std::tuple<IterTs...> Ends;
/// Attempts to increment a specific iterator.
///
/// Returns true if it was able to increment the iterator. Returns false if
/// the iterator is already at the end iterator.
template <size_t Index> bool incrementHelper() {
- auto &IterPair = std::get<Index>(IterPairs);
- if (IterPair.first == IterPair.second)
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
return false;
- ++IterPair.first;
+ ++Begin;
return true;
}
@@ -628,11 +858,12 @@ class concat_iterator
/// dereferences the iterator and returns the address of the resulting
/// reference.
template <size_t Index> ValueT *getHelper() const {
- auto &IterPair = std::get<Index>(IterPairs);
- if (IterPair.first == IterPair.second)
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
return nullptr;
- return &*IterPair.first;
+ return &*Begin;
}
/// Finds the first non-end iterator, dereferences, and returns the resulting
@@ -659,7 +890,7 @@ public:
/// iterators.
template <typename... RangeTs>
explicit concat_iterator(RangeTs &&... Ranges)
- : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {}
+ : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
using BaseT::operator++;
@@ -671,7 +902,7 @@ public:
ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
bool operator==(const concat_iterator &RHS) const {
- return IterPairs == RHS.IterPairs;
+ return Begins == RHS.Begins && Ends == RHS.Ends;
}
};
@@ -740,6 +971,19 @@ struct less_second {
}
};
+/// \brief Function object to apply a binary function to the first component of
+/// a std::pair.
+template<typename FuncTy>
+struct on_first {
+ FuncTy func;
+
+ template <typename T>
+ auto operator()(const T &lhs, const T &rhs) const
+ -> decltype(func(lhs.first, rhs.first)) {
+ return func(lhs.first, rhs.first);
+ }
+};
+
// A subset of N3658. More stuff can be added as-needed.
/// Represents a compile-time sequence of integers.
@@ -877,6 +1121,10 @@ inline void sort(IteratorTy Start, IteratorTy End) {
std::sort(Start, End);
}
+template <typename Container> inline void sort(Container &&C) {
+ llvm::sort(adl_begin(C), adl_end(C));
+}
+
template <typename IteratorTy, typename Compare>
inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
#ifdef EXPENSIVE_CHECKS
@@ -886,6 +1134,11 @@ inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
std::sort(Start, End, Comp);
}
+template <typename Container, typename Compare>
+inline void sort(Container &&C, Compare Comp) {
+ llvm::sort(adl_begin(C), adl_end(C), Comp);
+}
+
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
@@ -908,6 +1161,18 @@ void DeleteContainerSeconds(Container &C) {
C.clear();
}
+/// Get the size of a range. This is a wrapper function around std::distance
+/// which is only enabled when the operation is O(1).
+template <typename R>
+auto size(R &&Range, typename std::enable_if<
+ std::is_same<typename std::iterator_traits<decltype(
+ Range.begin())>::iterator_category,
+ std::random_access_iterator_tag>::value,
+ void>::type * = nullptr)
+ -> decltype(std::distance(Range.begin(), Range.end())) {
+ return std::distance(Range.begin(), Range.end());
+}
+
/// Provide wrappers to std::for_each which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
@@ -1018,6 +1283,33 @@ auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
return std::lower_bound(adl_begin(Range), adl_end(Range), I);
}
+template <typename R, typename ForwardIt, typename Compare>
+auto lower_bound(R &&Range, ForwardIt I, Compare C)
+ -> decltype(adl_begin(Range)) {
+ return std::lower_bound(adl_begin(Range), adl_end(Range), I, C);
+}
+
+/// Provide wrappers to std::upper_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename ForwardIt>
+auto upper_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range), I);
+}
+
+template <typename R, typename ForwardIt, typename Compare>
+auto upper_bound(R &&Range, ForwardIt I, Compare C)
+ -> decltype(adl_begin(Range)) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range), I, C);
+}
+/// Wrapper function around std::equal to detect if all elements
+/// in a container are same.
+template <typename R>
+bool is_splat(R &&Range) {
+ size_t range_size = size(Range);
+ return range_size != 0 && (range_size == 1 ||
+ std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
+}
+
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
@@ -1039,18 +1331,6 @@ void erase_if(Container &C, UnaryPredicate P) {
C.erase(remove_if(C, P), C.end());
}
-/// Get the size of a range. This is a wrapper function around std::distance
-/// which is only enabled when the operation is O(1).
-template <typename R>
-auto size(R &&Range, typename std::enable_if<
- std::is_same<typename std::iterator_traits<decltype(
- Range.begin())>::iterator_category,
- std::random_access_iterator_tag>::value,
- void>::type * = nullptr)
- -> decltype(std::distance(Range.begin(), Range.end())) {
- return std::distance(Range.begin(), Range.end());
-}
-
//===----------------------------------------------------------------------===//
// Extra additions to <memory>
//===----------------------------------------------------------------------===//
@@ -1263,6 +1543,40 @@ auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl(
Indices{});
}
+/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
+/// time. Not meant for use with random-access iterators.
+template <typename IterTy>
+bool hasNItems(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ typename std::enable_if<
+ !std::is_same<
+ typename std::iterator_traits<typename std::remove_reference<
+ decltype(Begin)>::type>::iterator_category,
+ std::random_access_iterator_tag>::value,
+ void>::type * = nullptr) {
+ for (; N; --N, ++Begin)
+ if (Begin == End)
+ return false; // Too few.
+ return Begin == End;
+}
+
+/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
+/// time. Not meant for use with random-access iterators.
+template <typename IterTy>
+bool hasNItemsOrMore(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ typename std::enable_if<
+ !std::is_same<
+ typename std::iterator_traits<typename std::remove_reference<
+ decltype(Begin)>::type>::iterator_category,
+ std::random_access_iterator_tag>::value,
+ void>::type * = nullptr) {
+ for (; N; --N, ++Begin)
+ if (Begin == End)
+ return false; // Too few.
+ return true;
+}
+
} // end namespace llvm
#endif // LLVM_ADT_STLEXTRAS_H
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index b6391746639b..0a73dbd60671 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -92,10 +92,6 @@ public:
};
private:
- bool isSmall() const {
- return X & uintptr_t(1);
- }
-
BitVector *getPointer() const {
assert(!isSmall());
return reinterpret_cast<BitVector *>(X);
@@ -186,6 +182,8 @@ public:
return make_range(set_bits_begin(), set_bits_end());
}
+ bool isSmall() const { return X & uintptr_t(1); }
+
/// Tests whether there are no bits in this bitvector.
bool empty() const {
return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
@@ -242,7 +240,7 @@ public:
uintptr_t Bits = getSmallBits();
if (Bits == 0)
return -1;
- return NumBaseBits - countLeadingZeros(Bits);
+ return NumBaseBits - countLeadingZeros(Bits) - 1;
}
return getPointer()->find_last();
}
@@ -265,7 +263,9 @@ public:
return -1;
uintptr_t Bits = getSmallBits();
- return NumBaseBits - countLeadingOnes(Bits);
+ // Set unused bits.
+ Bits |= ~uintptr_t(0) << getSmallSize();
+ return NumBaseBits - countLeadingOnes(Bits) - 1;
}
return getPointer()->find_last_unset();
}
@@ -465,6 +465,11 @@ public:
return (*this)[Idx];
}
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ resize(size() + 1, Val);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
@@ -482,10 +487,17 @@ public:
bool operator==(const SmallBitVector &RHS) const {
if (size() != RHS.size())
return false;
- if (isSmall())
+ if (isSmall() && RHS.isSmall())
return getSmallBits() == RHS.getSmallBits();
- else
+ else if (!isSmall() && !RHS.isSmall())
return *getPointer() == *RHS.getPointer();
+ else {
+ for (size_t i = 0, e = size(); i != e; ++i) {
+ if ((*this)[i] != RHS[i])
+ return false;
+ }
+ return true;
+ }
}
bool operator!=(const SmallBitVector &RHS) const {
@@ -493,16 +505,19 @@ public:
}
// Intersection, union, disjoint union.
+ // FIXME BitVector::operator&= does not resize the LHS but this does
SmallBitVector &operator&=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
- if (isSmall())
+ if (isSmall() && RHS.isSmall())
setSmallBits(getSmallBits() & RHS.getSmallBits());
- else if (!RHS.isSmall())
+ else if (!isSmall() && !RHS.isSmall())
getPointer()->operator&=(*RHS.getPointer());
else {
- SmallBitVector Copy = RHS;
- Copy.resize(size());
- getPointer()->operator&=(*Copy.getPointer());
+ size_t i, e;
+ for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ (*this)[i] = test(i) && RHS.test(i);
+ for (e = size(); i != e; ++i)
+ reset(i);
}
return *this;
}
@@ -542,28 +557,26 @@ public:
SmallBitVector &operator|=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
- if (isSmall())
+ if (isSmall() && RHS.isSmall())
setSmallBits(getSmallBits() | RHS.getSmallBits());
- else if (!RHS.isSmall())
+ else if (!isSmall() && !RHS.isSmall())
getPointer()->operator|=(*RHS.getPointer());
else {
- SmallBitVector Copy = RHS;
- Copy.resize(size());
- getPointer()->operator|=(*Copy.getPointer());
+ for (size_t i = 0, e = RHS.size(); i != e; ++i)
+ (*this)[i] = test(i) || RHS.test(i);
}
return *this;
}
SmallBitVector &operator^=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
- if (isSmall())
+ if (isSmall() && RHS.isSmall())
setSmallBits(getSmallBits() ^ RHS.getSmallBits());
- else if (!RHS.isSmall())
+ else if (!isSmall() && !RHS.isSmall())
getPointer()->operator^=(*RHS.getPointer());
else {
- SmallBitVector Copy = RHS;
- Copy.resize(size());
- getPointer()->operator^=(*Copy.getPointer());
+ for (size_t i = 0, e = RHS.size(); i != e; ++i)
+ (*this)[i] = test(i) != RHS.test(i);
}
return *this;
}
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index acb4426b4f45..0636abbb1fbf 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -182,7 +182,7 @@ public:
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
-template <typename T, bool isPodLike>
+template <typename T, bool = isPodLike<T>::value>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
@@ -299,7 +299,7 @@ protected:
// use memcpy here. Note that I and E are iterators and thus might be
// invalid for memcpy if they are equal.
if (I != E)
- memcpy(Dest, I, (E - I) * sizeof(T));
+ memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
}
/// Double the size of the allocated memory, guaranteeing space for at
@@ -310,7 +310,7 @@ public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
- memcpy(this->end(), &Elt, sizeof(T));
+ memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
this->set_size(this->size() + 1);
}
@@ -320,8 +320,8 @@ public:
/// This class consists of common code factored out of the SmallVector class to
/// reduce code duplication based on the SmallVector 'N' template parameter.
template <typename T>
-class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
- using SuperClass = SmallVectorTemplateBase<T, isPodLike<T>::value>;
+class SmallVectorImpl : public SmallVectorTemplateBase<T> {
+ using SuperClass = SmallVectorTemplateBase<T>;
public:
using iterator = typename SuperClass::iterator;
diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
index 4cbf40c76805..84e73bcbace8 100644
--- a/contrib/llvm/include/llvm/ADT/SparseBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
@@ -261,21 +261,33 @@ class SparseBitVector {
BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
};
- // Pointer to our current Element.
- ElementListIter CurrElementIter;
ElementList Elements;
+ // Pointer to our current Element. This has no visible effect on the external
+ // state of a SparseBitVector, it's just used to improve performance in the
+ // common case of testing/modifying bits with similar indices.
+ mutable ElementListIter CurrElementIter;
// This is like std::lower_bound, except we do linear searching from the
// current position.
- ElementListIter FindLowerBound(unsigned ElementIndex) {
+ ElementListIter FindLowerBoundImpl(unsigned ElementIndex) const {
+
+ // We cache a non-const iterator so we're forced to resort to const_cast to
+ // get the begin/end in the case where 'this' is const. To avoid duplication
+ // of code with the only difference being whether the const cast is present
+ // 'this' is always const in this particular function and we sort out the
+ // difference in FindLowerBound and FindLowerBoundConst.
+ ElementListIter Begin =
+ const_cast<SparseBitVector<ElementSize> *>(this)->Elements.begin();
+ ElementListIter End =
+ const_cast<SparseBitVector<ElementSize> *>(this)->Elements.end();
if (Elements.empty()) {
- CurrElementIter = Elements.begin();
- return Elements.begin();
+ CurrElementIter = Begin;
+ return CurrElementIter;
}
// Make sure our current iterator is valid.
- if (CurrElementIter == Elements.end())
+ if (CurrElementIter == End)
--CurrElementIter;
// Search from our current iterator, either backwards or forwards,
@@ -284,17 +296,23 @@ class SparseBitVector {
if (CurrElementIter->index() == ElementIndex) {
return ElementIter;
} else if (CurrElementIter->index() > ElementIndex) {
- while (ElementIter != Elements.begin()
+ while (ElementIter != Begin
&& ElementIter->index() > ElementIndex)
--ElementIter;
} else {
- while (ElementIter != Elements.end() &&
+ while (ElementIter != End &&
ElementIter->index() < ElementIndex)
++ElementIter;
}
CurrElementIter = ElementIter;
return ElementIter;
}
+ ElementListConstIter FindLowerBoundConst(unsigned ElementIndex) const {
+ return FindLowerBoundImpl(ElementIndex);
+ }
+ ElementListIter FindLowerBound(unsigned ElementIndex) {
+ return FindLowerBoundImpl(ElementIndex);
+ }
// Iterator to walk set bits in the bitmap. This iterator is a lot uglier
// than it would be, in order to be efficient.
@@ -423,22 +441,12 @@ class SparseBitVector {
public:
using iterator = SparseBitVectorIterator;
- SparseBitVector() {
- CurrElementIter = Elements.begin();
- }
+ SparseBitVector() : Elements(), CurrElementIter(Elements.begin()) {}
- // SparseBitVector copy ctor.
- SparseBitVector(const SparseBitVector &RHS) {
- ElementListConstIter ElementIter = RHS.Elements.begin();
- while (ElementIter != RHS.Elements.end()) {
- Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
- ++ElementIter;
- }
-
- CurrElementIter = Elements.begin ();
- }
-
- ~SparseBitVector() = default;
+ SparseBitVector(const SparseBitVector &RHS)
+ : Elements(RHS.Elements), CurrElementIter(Elements.begin()) {}
+ SparseBitVector(SparseBitVector &&RHS)
+ : Elements(std::move(RHS.Elements)), CurrElementIter(Elements.begin()) {}
// Clear.
void clear() {
@@ -450,26 +458,23 @@ public:
if (this == &RHS)
return *this;
- Elements.clear();
-
- ElementListConstIter ElementIter = RHS.Elements.begin();
- while (ElementIter != RHS.Elements.end()) {
- Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
- ++ElementIter;
- }
-
- CurrElementIter = Elements.begin ();
-
+ Elements = RHS.Elements;
+ CurrElementIter = Elements.begin();
+ return *this;
+ }
+ SparseBitVector &operator=(SparseBitVector &&RHS) {
+ Elements = std::move(RHS.Elements);
+ CurrElementIter = Elements.begin();
return *this;
}
// Test, Reset, and Set a bit in the bitmap.
- bool test(unsigned Idx) {
+ bool test(unsigned Idx) const {
if (Elements.empty())
return false;
unsigned ElementIndex = Idx / ElementSize;
- ElementListIter ElementIter = FindLowerBound(ElementIndex);
+ ElementListConstIter ElementIter = FindLowerBoundConst(ElementIndex);
// If we can't find an element that is supposed to contain this bit, there
// is nothing more to do.
diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h
index 71b0e7527cb7..60a03633a8a6 100644
--- a/contrib/llvm/include/llvm/ADT/StringExtras.h
+++ b/contrib/llvm/include/llvm/ADT/StringExtras.h
@@ -139,22 +139,23 @@ inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
-inline std::string toHex(StringRef Input) {
+inline std::string toHex(StringRef Input, bool LowerCase = false) {
static const char *const LUT = "0123456789ABCDEF";
+ const uint8_t Offset = LowerCase ? 32 : 0;
size_t Length = Input.size();
std::string Output;
Output.reserve(2 * Length);
for (size_t i = 0; i < Length; ++i) {
const unsigned char c = Input[i];
- Output.push_back(LUT[c >> 4]);
- Output.push_back(LUT[c & 15]);
+ Output.push_back(LUT[c >> 4] | Offset);
+ Output.push_back(LUT[c & 15] | Offset);
}
return Output;
}
-inline std::string toHex(ArrayRef<uint8_t> Input) {
- return toHex(toStringRef(Input));
+inline std::string toHex(ArrayRef<uint8_t> Input, bool LowerCase = false) {
+ return toHex(toStringRef(Input), LowerCase);
}
inline uint8_t hexFromNibbles(char MSB, char LSB) {
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index c95b16dd4e8c..e06a68e27317 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -55,12 +55,11 @@ public:
bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
hexagon, // Hexagon: hexagon
- mips, // MIPS: mips, mipsallegrex
- mipsel, // MIPSEL: mipsel, mipsallegrexel
- mips64, // MIPS64: mips64
- mips64el, // MIPS64EL: mips64el
+ mips, // MIPS: mips, mipsallegrex, mipsr6
+ mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el
+ mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
+ mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
msp430, // MSP430: msp430
- nios2, // NIOSII: nios2
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
ppc64le, // PPC64LE: powerpc64le
@@ -101,6 +100,7 @@ public:
enum SubArchType {
NoSubArch,
+ ARMSubArch_v8_5a,
ARMSubArch_v8_4a,
ARMSubArch_v8_3a,
ARMSubArch_v8_2a,
@@ -125,7 +125,9 @@ public:
KalimbaSubArch_v3,
KalimbaSubArch_v4,
- KalimbaSubArch_v5
+ KalimbaSubArch_v5,
+
+ MipsSubArch_r6
};
enum VendorType {
UnknownVendor,
@@ -182,7 +184,10 @@ public:
Mesa3D,
Contiki,
AMDPAL, // AMD PAL Runtime
- LastOSType = AMDPAL
+ HermitCore, // HermitCore Unikernel/Multikernel
+ Hurd, // GNU/Hurd
+ WASI, // Experimental WebAssembly OS
+ LastOSType = WASI
};
enum EnvironmentType {
UnknownEnvironment,
@@ -578,9 +583,20 @@ public:
return getOS() == Triple::KFreeBSD;
}
+ /// Tests whether the OS is Hurd.
+ bool isOSHurd() const {
+ return getOS() == Triple::Hurd;
+ }
+
+ /// Tests whether the OS is WASI.
+ bool isOSWASI() const {
+ return getOS() == Triple::WASI;
+ }
+
/// Tests whether the OS uses glibc.
bool isOSGlibc() const {
- return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD) &&
+ return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD ||
+ getOS() == Triple::Hurd) &&
!isAndroid();
}
diff --git a/contrib/llvm/include/llvm/ADT/bit.h b/contrib/llvm/include/llvm/ADT/bit.h
new file mode 100644
index 000000000000..a4aba7b6a9ee
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/bit.h
@@ -0,0 +1,59 @@
+//===-- llvm/ADT/bit.h - C++20 <bit> ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++20 <bit> header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BIT_H
+#define LLVM_ADT_BIT_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstring>
+#include <type_traits>
+
+namespace llvm {
+
+// This implementation of bit_cast is different from the C++17 one in two ways:
+// - It isn't constexpr because that requires compiler support.
+// - It requires trivially-constructible To, to avoid UB in the implementation.
+template <typename To, typename From
+ , typename = typename std::enable_if<sizeof(To) == sizeof(From)>::type
+#if (__has_feature(is_trivially_constructible) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ , typename = typename std::is_trivially_constructible<To>::type
+#elif __has_feature(is_trivially_constructible)
+ , typename = typename std::enable_if<__is_trivially_constructible(To)>::type
+#else
+ // See comment below.
+#endif
+#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ , typename = typename std::enable_if<std::is_trivially_copyable<To>::value>::type
+ , typename = typename std::enable_if<std::is_trivially_copyable<From>::value>::type
+#elif __has_feature(is_trivially_copyable)
+ , typename = typename std::enable_if<__is_trivially_copyable(To)>::type
+ , typename = typename std::enable_if<__is_trivially_copyable(From)>::type
+#else
+ // This case is GCC 4.x. clang with libc++ or libstdc++ never get here. Unlike
+ // llvm/Support/type_traits.h's isPodLike we don't want to provide a
+ // good-enough answer here: developers in that configuration will hit
+ // compilation failures on the bots instead of locally. That's acceptable
+ // because it's very few developers, and only until we move past C++11.
+#endif
+>
+inline To bit_cast(const From &from) noexcept {
+ To to;
+ std::memcpy(&to, &from, sizeof(To));
+ return to;
+}
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/iterator.h b/contrib/llvm/include/llvm/ADT/iterator.h
index 549c5221173d..40e490cf7864 100644
--- a/contrib/llvm/include/llvm/ADT/iterator.h
+++ b/contrib/llvm/include/llvm/ADT/iterator.h
@@ -202,9 +202,7 @@ template <
typename ReferenceT = typename std::conditional<
std::is_same<T, typename std::iterator_traits<
WrappedIteratorT>::value_type>::value,
- typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type,
- // Don't provide these, they are mostly to act as aliases below.
- typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
+ typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type>
class iterator_adaptor_base
: public iterator_facade_base<DerivedT, IteratorCategoryT, T,
DifferenceTypeT, PointerT, ReferenceT> {
@@ -311,8 +309,10 @@ make_pointee_range(RangeT &&Range) {
template <typename WrappedIteratorT,
typename T = decltype(&*std::declval<WrappedIteratorT>())>
class pointer_iterator
- : public iterator_adaptor_base<pointer_iterator<WrappedIteratorT, T>,
- WrappedIteratorT, T> {
+ : public iterator_adaptor_base<
+ pointer_iterator<WrappedIteratorT, T>, WrappedIteratorT,
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ T> {
mutable T Ptr;
public:
@@ -334,6 +334,34 @@ make_pointer_range(RangeT &&Range) {
PointerIteratorT(std::end(std::forward<RangeT>(Range))));
}
+// Wrapper iterator over iterator ItType, adding DataRef to the type of ItType,
+// to create NodeRef = std::pair<InnerTypeOfItType, DataRef>.
+template <typename ItType, typename NodeRef, typename DataRef>
+class WrappedPairNodeDataIterator
+ : public iterator_adaptor_base<
+ WrappedPairNodeDataIterator<ItType, NodeRef, DataRef>, ItType,
+ typename std::iterator_traits<ItType>::iterator_category, NodeRef,
+ std::ptrdiff_t, NodeRef *, NodeRef &> {
+ using BaseT = iterator_adaptor_base<
+ WrappedPairNodeDataIterator, ItType,
+ typename std::iterator_traits<ItType>::iterator_category, NodeRef,
+ std::ptrdiff_t, NodeRef *, NodeRef &>;
+
+ const DataRef DR;
+ mutable NodeRef NR;
+
+public:
+ WrappedPairNodeDataIterator(ItType Begin, const DataRef DR)
+ : BaseT(Begin), DR(DR) {
+ NR.first = DR;
+ }
+
+ NodeRef &operator*() const {
+ NR.second = *this->I;
+ return NR;
+ }
+};
+
} // end namespace llvm
#endif // LLVM_ADT_ITERATOR_H
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index be3496bbd955..e2a2ac0622e8 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -43,7 +43,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
@@ -335,8 +334,7 @@ public:
/// A convenience wrapper around the primary \c alias interface.
AliasResult alias(const Value *V1, const Value *V2) {
- return alias(V1, MemoryLocation::UnknownSize, V2,
- MemoryLocation::UnknownSize);
+ return alias(V1, LocationSize::unknown(), V2, LocationSize::unknown());
}
/// A trivial helper function to check to see if the specified pointers are
@@ -364,7 +362,8 @@ public:
/// A convenience wrapper around the \c isMustAlias helper interface.
bool isMustAlias(const Value *V1, const Value *V2) {
- return alias(V1, 1, V2, 1) == MustAlias;
+ return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) ==
+ MustAlias;
}
/// Checks whether the given location points to constant memory, or if
@@ -382,15 +381,15 @@ public:
/// \name Simple mod/ref information
/// @{
- /// Get the ModRef info associated with a pointer argument of a callsite. The
+ /// Get the ModRef info associated with a pointer argument of a call. The
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information. This never sets ModRefInfo::Must.
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
/// Return the behavior of the given call site.
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
/// Return the behavior when calling the given function.
FunctionModRefBehavior getModRefBehavior(const Function *F);
@@ -406,8 +405,8 @@ public:
/// property (e.g. calls to 'sin' and 'cos').
///
/// This property corresponds to the GCC 'const' attribute.
- bool doesNotAccessMemory(ImmutableCallSite CS) {
- return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
+ bool doesNotAccessMemory(const CallBase *Call) {
+ return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory;
}
/// Checks if the specified function is known to never read or write memory.
@@ -434,8 +433,8 @@ public:
/// absence of interfering store instructions, such as CSE of strlen calls.
///
/// This property corresponds to the GCC 'pure' attribute.
- bool onlyReadsMemory(ImmutableCallSite CS) {
- return onlyReadsMemory(getModRefBehavior(CS));
+ bool onlyReadsMemory(const CallBase *Call) {
+ return onlyReadsMemory(getModRefBehavior(Call));
}
/// Checks if the specified function is known to only read from non-volatile
@@ -500,36 +499,12 @@ public:
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
/// getModRefInfo (for call sites) - A convenience wrapper.
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
+ ModRefInfo getModRefInfo(const CallBase *Call, const Value *P,
LocationSize Size) {
- return getModRefInfo(CS, MemoryLocation(P, Size));
- }
-
- /// getModRefInfo (for calls) - Return information about whether
- /// a particular call modifies or reads the specified memory location.
- ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
- return getModRefInfo(ImmutableCallSite(C), Loc);
- }
-
- /// getModRefInfo (for calls) - A convenience wrapper.
- ModRefInfo getModRefInfo(const CallInst *C, const Value *P,
- LocationSize Size) {
- return getModRefInfo(C, MemoryLocation(P, Size));
- }
-
- /// getModRefInfo (for invokes) - Return information about whether
- /// a particular invoke modifies or reads the specified memory location.
- ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
- return getModRefInfo(ImmutableCallSite(I), Loc);
- }
-
- /// getModRefInfo (for invokes) - A convenience wrapper.
- ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P,
- LocationSize Size) {
- return getModRefInfo(I, MemoryLocation(P, Size));
+ return getModRefInfo(Call, MemoryLocation(P, Size));
}
/// getModRefInfo (for loads) - Return information about whether
@@ -569,7 +544,7 @@ public:
/// getModRefInfo (for cmpxchges) - A convenience wrapper.
ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
- unsigned Size) {
+ LocationSize Size) {
return getModRefInfo(CX, MemoryLocation(P, Size));
}
@@ -579,7 +554,7 @@ public:
/// getModRefInfo (for atomicrmws) - A convenience wrapper.
ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
- unsigned Size) {
+ LocationSize Size) {
return getModRefInfo(RMW, MemoryLocation(P, Size));
}
@@ -626,8 +601,8 @@ public:
ModRefInfo getModRefInfo(const Instruction *I,
const Optional<MemoryLocation> &OptLoc) {
if (OptLoc == None) {
- if (auto CS = ImmutableCallSite(I)) {
- return createModRefInfo(getModRefBehavior(CS));
+ if (const auto *Call = dyn_cast<CallBase>(I)) {
+ return createModRefInfo(getModRefBehavior(Call));
}
}
@@ -661,12 +636,12 @@ public:
/// Return information about whether a call and an instruction may refer to
/// the same memory locations.
- ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
+ ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call);
/// Return information about whether two call sites may refer to the same set
/// of memory locations. See the AA documentation for details:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
/// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
@@ -777,25 +752,25 @@ public:
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information.
- virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
+ virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
unsigned ArgIdx) = 0;
/// Return the behavior of the given call site.
- virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
+ virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0;
/// Return the behavior when calling the given function.
virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
- virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
+ virtual ModRefInfo getModRefInfo(const CallBase *Call,
const MemoryLocation &Loc) = 0;
/// Return information about whether two call sites may refer to the same set
/// of memory locations. See the AA documentation for details:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
- virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) = 0;
+ virtual ModRefInfo getModRefInfo(const CallBase *Call1,
+ const CallBase *Call2) = 0;
/// @}
};
@@ -827,26 +802,26 @@ public:
return Result.pointsToConstantMemory(Loc, OrLocal);
}
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
- return Result.getArgModRefInfo(CS, ArgIdx);
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
+ return Result.getArgModRefInfo(Call, ArgIdx);
}
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
- return Result.getModRefBehavior(CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override {
+ return Result.getModRefBehavior(Call);
}
FunctionModRefBehavior getModRefBehavior(const Function *F) override {
return Result.getModRefBehavior(F);
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS,
+ ModRefInfo getModRefInfo(const CallBase *Call,
const MemoryLocation &Loc) override {
- return Result.getModRefInfo(CS, Loc);
+ return Result.getModRefInfo(Call, Loc);
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) override {
- return Result.getModRefInfo(CS1, CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call1,
+ const CallBase *Call2) override {
+ return Result.getModRefInfo(Call1, Call2);
}
};
@@ -901,25 +876,28 @@ protected:
: CurrentResult.pointsToConstantMemory(Loc, OrLocal);
}
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
- return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
+ return AAR ? AAR->getArgModRefInfo(Call, ArgIdx)
+ : CurrentResult.getArgModRefInfo(Call, ArgIdx);
}
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
- return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
+ return AAR ? AAR->getModRefBehavior(Call)
+ : CurrentResult.getModRefBehavior(Call);
}
FunctionModRefBehavior getModRefBehavior(const Function *F) {
return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
- return AAR ? AAR->getModRefInfo(CS, Loc)
- : CurrentResult.getModRefInfo(CS, Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
+ return AAR ? AAR->getModRefInfo(Call, Loc)
+ : CurrentResult.getModRefInfo(Call, Loc);
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
- return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
+ return AAR ? AAR->getModRefInfo(Call1, Call2)
+ : CurrentResult.getModRefInfo(Call1, Call2);
}
};
@@ -951,11 +929,11 @@ public:
return false;
}
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
return ModRefInfo::ModRef;
}
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
return FMRB_UnknownModRefBehavior;
}
@@ -963,11 +941,11 @@ public:
return FMRB_UnknownModRefBehavior;
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
return ModRefInfo::ModRef;
}
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
return ModRefInfo::ModRef;
}
};
@@ -1075,6 +1053,29 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
+/// A wrapper pass for external alias analyses. This just squirrels away the
+/// callback used to run any analyses and register their results.
+struct ExternalAAWrapperPass : ImmutablePass {
+ using CallbackT = std::function<void(Pass &, Function &, AAResults &)>;
+
+ CallbackT CB;
+
+ static char ID;
+
+ ExternalAAWrapperPass() : ImmutablePass(ID) {
+ initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ explicit ExternalAAWrapperPass(CallbackT CB)
+ : ImmutablePass(ID), CB(std::move(CB)) {
+ initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
FunctionPass *createAAResultsWrapperPass();
/// A wrapper pass around a callback which can be used to populate the
diff --git a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
index c9680ff40d1e..7ed5cd5c4734 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -52,9 +52,13 @@ class AliasSet : public ilist_node<AliasSet> {
PointerRec **PrevInList = nullptr;
PointerRec *NextInList = nullptr;
AliasSet *AS = nullptr;
- LocationSize Size = 0;
+ LocationSize Size = LocationSize::mapEmpty();
AAMDNodes AAInfo;
+ // Whether the size for this record has been set at all. This makes no
+ // guarantees about the size being known.
+ bool isSizeSet() const { return Size != LocationSize::mapEmpty(); }
+
public:
PointerRec(Value *V)
: Val(V), AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}
@@ -71,9 +75,10 @@ class AliasSet : public ilist_node<AliasSet> {
bool updateSizeAndAAInfo(LocationSize NewSize, const AAMDNodes &NewAAInfo) {
bool SizeChanged = false;
- if (NewSize > Size) {
- Size = NewSize;
- SizeChanged = true;
+ if (NewSize != Size) {
+ LocationSize OldSize = Size;
+ Size = isSizeSet() ? Size.unionWith(NewSize) : NewSize;
+ SizeChanged = OldSize != Size;
}
if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
@@ -91,7 +96,10 @@ class AliasSet : public ilist_node<AliasSet> {
return SizeChanged;
}
- LocationSize getSize() const { return Size; }
+ LocationSize getSize() const {
+ assert(isSizeSet() && "Getting an unset size!");
+ return Size;
+ }
/// Return the AAInfo, or null if there is no information or conflicting
/// information.
@@ -175,9 +183,6 @@ class AliasSet : public ilist_node<AliasSet> {
};
unsigned Alias : 1;
- /// True if this alias set contains volatile loads or stores.
- unsigned Volatile : 1;
-
unsigned SetSize = 0;
void addRef() { ++RefCount; }
@@ -203,9 +208,6 @@ public:
bool isMustAlias() const { return Alias == SetMustAlias; }
bool isMayAlias() const { return Alias == SetMayAlias; }
- /// Return true if this alias set contains volatile loads or stores.
- bool isVolatile() const { return Volatile; }
-
/// Return true if this alias set should be ignored as part of the
/// AliasSetTracker object.
bool isForwardingAliasSet() const { return Forward; }
@@ -224,6 +226,10 @@ public:
// track of the list's exact size.
unsigned size() { return SetSize; }
+ /// If this alias set is known to contain a single instruction and *only* a
+ /// single unique instruction, return it. Otherwise, return nullptr.
+ Instruction* getUniqueInstruction();
+
void print(raw_ostream &OS) const;
void dump() const;
@@ -264,7 +270,7 @@ private:
// Can only be created by AliasSetTracker.
AliasSet()
: PtrListEnd(&PtrList), RefCount(0), AliasAny(false), Access(NoAccess),
- Alias(SetMustAlias), Volatile(false) {}
+ Alias(SetMustAlias) {}
PointerRec *getSomePointer() const {
return PtrList;
@@ -303,8 +309,6 @@ private:
dropRef(AST);
}
- void setVolatile() { Volatile = true; }
-
public:
/// Return true if the specified pointer "may" (or must) alias one of the
/// members in the set.
@@ -379,23 +383,11 @@ public:
/// Return the alias sets that are active.
const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
- /// Return the alias set that the specified pointer lives in. If the New
- /// argument is non-null, this method sets the value to true if a new alias
- /// set is created to contain the pointer (because the pointer didn't alias
- /// anything).
- AliasSet &getAliasSetForPointer(Value *P, LocationSize Size,
- const AAMDNodes &AAInfo);
-
- /// Return the alias set containing the location specified if one exists,
- /// otherwise return null.
- AliasSet *getAliasSetForPointerIfExists(const Value *P, LocationSize Size,
- const AAMDNodes &AAInfo) {
- return mergeAliasSetsForPointer(P, Size, AAInfo);
- }
-
- /// Return true if the specified instruction "may" (or must) alias one of the
- /// members in any of the sets.
- bool containsUnknown(const Instruction *I) const;
+ /// Return the alias set which contains the specified memory location. If
+ /// the memory location aliases two or more existing alias sets, will have
+ /// the effect of merging those alias sets before the single resulting alias
+ /// set is returned.
+ AliasSet &getAliasSetFor(const MemoryLocation &MemLoc);
/// Return the underlying alias analysis object used by this tracker.
AliasAnalysis &getAliasAnalysis() const { return AA; }
@@ -445,8 +437,7 @@ private:
return *Entry;
}
- AliasSet &addPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo,
- AliasSet::AccessLattice E);
+ AliasSet &addPointer(MemoryLocation Loc, AliasSet::AccessLattice E);
AliasSet *mergeAliasSetsForPointer(const Value *Ptr, LocationSize Size,
const AAMDNodes &AAInfo);
diff --git a/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
index 6344e84b58eb..820d7ac0935a 100644
--- a/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -21,7 +21,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <algorithm>
@@ -84,18 +84,18 @@ public:
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
/// Chases pointers until we find a (constant global) or not.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
/// Get the location associated with a pointer argument of a callsite.
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+ ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
/// Returns the behavior when calling the given call site.
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
/// Returns the behavior when calling the given function. For use when the
/// call site is not known.
@@ -115,7 +115,7 @@ private:
unsigned ZExtBits;
unsigned SExtBits;
- int64_t Scale;
+ APInt Scale;
bool operator==(const VariableGEPIndex &Other) const {
return V == Other.V && ZExtBits == Other.ZExtBits &&
@@ -133,10 +133,10 @@ private:
// Base pointer of the GEP
const Value *Base;
// Total constant offset w.r.t the base from indexing into structs
- int64_t StructOffset;
+ APInt StructOffset;
// Total constant offset w.r.t the base from indexing through
// pointers/arrays/vectors
- int64_t OtherOffset;
+ APInt OtherOffset;
// Scaled variable (non-constant) indices.
SmallVector<VariableGEPIndex, 4> VarIndices;
};
@@ -189,7 +189,7 @@ private:
bool
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
LocationSize V1Size, LocationSize V2Size,
- int64_t BaseOffset, AssumptionCache *AC,
+ APInt BaseOffset, AssumptionCache *AC,
DominatorTree *DT);
bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
index ca12db6208b8..0b2618735697 100644
--- a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -56,7 +56,7 @@ public:
const Function *getFunction() const;
const BranchProbabilityInfo *getBPI() const;
- void view() const;
+ void view(StringRef = "BlockFrequencyDAGs") const;
/// getblockFreq - Return block frequency. Return 0 if we don't have the
/// information. Please note that initial frequency is equal to ENTRY_FREQ. It
diff --git a/contrib/llvm/include/llvm/Analysis/CFG.h b/contrib/llvm/include/llvm/Analysis/CFG.h
index cccdd1637411..caae0b6e2a8f 100644
--- a/contrib/llvm/include/llvm/Analysis/CFG.h
+++ b/contrib/llvm/include/llvm/Analysis/CFG.h
@@ -25,7 +25,6 @@ class DominatorTree;
class Function;
class Instruction;
class LoopInfo;
-class TerminatorInst;
/// Analyze the specified function to find all of the loop backedges in the
/// function and return them. This is a relatively cheap (compared to
@@ -46,7 +45,7 @@ unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
/// edges from a block with multiple successors to a block with multiple
/// predecessors.
///
-bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
+bool isCriticalEdge(const Instruction *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
/// Determine whether instruction 'To' is reachable from 'From',
diff --git a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
index 5786769cc500..5996dd90bcfd 100644
--- a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -150,7 +150,7 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
/// Display the raw branch weights from PGO.
std::string getEdgeAttributes(const BasicBlock *Node, succ_const_iterator I,
const Function *F) {
- const TerminatorInst *TI = Node->getTerminator();
+ const Instruction *TI = Node->getTerminator();
if (TI->getNumSuccessors() == 1)
return "";
@@ -172,8 +172,7 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
// Prepend a 'W' to indicate that this is a weight rather than the actual
// profile count (due to scaling).
- Twine Attrs = "label=\"W:" + Twine(Weight->getZExtValue()) + "\"";
- return Attrs.str();
+ return ("label=\"W:" + Twine(Weight->getZExtValue()) + "\"").str();
}
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h b/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h
index 5e83ea2a6e2b..61b99f6c3e6b 100644
--- a/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h
+++ b/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h
@@ -364,6 +364,10 @@ public:
InvalidSCCSet, nullptr, nullptr,
InlinedInternalEdges};
+ // Request PassInstrumentation from analysis manager, will use it to run
+ // instrumenting callbacks for the passes later.
+ PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
+
PreservedAnalyses PA = PreservedAnalyses::all();
CG.buildRefSCCs();
for (auto RCI = CG.postorder_ref_scc_begin(),
@@ -428,8 +432,20 @@ public:
UR.UpdatedRC = nullptr;
UR.UpdatedC = nullptr;
+
+ // Check the PassInstrumentation's BeforePass callbacks before
+ // running the pass, skip its execution completely if asked to
+ // (callback returns false).
+ if (!PI.runBeforePass<LazyCallGraph::SCC>(Pass, *C))
+ continue;
+
PreservedAnalyses PassPA = Pass.run(*C, CGAM, CG, UR);
+ if (UR.InvalidatedSCCs.count(C))
+ PI.runAfterPassInvalidated<LazyCallGraph::SCC>(Pass);
+ else
+ PI.runAfterPass<LazyCallGraph::SCC>(Pass, *C);
+
// Update the SCC and RefSCC if necessary.
C = UR.UpdatedC ? UR.UpdatedC : C;
RC = UR.UpdatedRC ? UR.UpdatedRC : RC;
@@ -615,12 +631,20 @@ public:
if (CG.lookupSCC(*N) != CurrentC)
continue;
- PreservedAnalyses PassPA = Pass.run(N->getFunction(), FAM);
+ Function &F = N->getFunction();
+
+ PassInstrumentation PI = FAM.getResult<PassInstrumentationAnalysis>(F);
+ if (!PI.runBeforePass<Function>(Pass, F))
+ continue;
+
+ PreservedAnalyses PassPA = Pass.run(F, FAM);
+
+ PI.runAfterPass<Function>(Pass, F);
// We know that the function pass couldn't have invalidated any other
// function's analyses (that's the contract of a function pass), so
// directly handle the function analysis manager's invalidation here.
- FAM.invalidate(N->getFunction(), PassPA);
+ FAM.invalidate(F, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
@@ -690,6 +714,8 @@ public:
PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR) {
PreservedAnalyses PA = PreservedAnalyses::all();
+ PassInstrumentation PI =
+ AM.getResult<PassInstrumentationAnalysis>(InitialC, CG);
// The SCC may be refined while we are running passes over it, so set up
// a pointer that we can update.
@@ -733,8 +759,17 @@ public:
auto CallCounts = ScanSCC(*C, CallHandles);
for (int Iteration = 0;; ++Iteration) {
+
+ if (!PI.runBeforePass<LazyCallGraph::SCC>(Pass, *C))
+ continue;
+
PreservedAnalyses PassPA = Pass.run(*C, AM, CG, UR);
+ if (UR.InvalidatedSCCs.count(C))
+ PI.runAfterPassInvalidated<LazyCallGraph::SCC>(Pass);
+ else
+ PI.runAfterPass<LazyCallGraph::SCC>(Pass, *C);
+
// If the SCC structure has changed, bail immediately and let the outer
// CGSCC layer handle any iteration to reflect the refined structure.
if (UR.UpdatedC && UR.UpdatedC != C) {
diff --git a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
index 7a869a51233a..aaaaff9ae252 100644
--- a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -22,6 +22,14 @@ namespace llvm {
class DominatorTree;
class OrderedBasicBlock;
+ /// The default value for MaxUsesToExplore argument. It's relatively small to
+ /// keep the cost of analysis reasonable for clients like BasicAliasAnalysis,
+ /// where the results can't be cached.
+ /// TODO: we should probably introduce a caching CaptureTracking analysis and
+ /// use it where possible. The caching version can use much higher limit or
+ /// don't have this cap at all.
+ unsigned constexpr DefaultMaxUsesToExplore = 20;
+
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
@@ -29,9 +37,12 @@ namespace llvm {
/// counts as capturing it or not. The boolean StoreCaptures specified
/// whether storing the value (or part of it) into memory anywhere
/// automatically counts as capturing it or not.
+ /// MaxUsesToExplore specifies how many uses should the analysis explore for
+ /// one value before giving up due too "too many uses".
bool PointerMayBeCaptured(const Value *V,
bool ReturnCaptures,
- bool StoreCaptures);
+ bool StoreCaptures,
+ unsigned MaxUsesToExplore = DefaultMaxUsesToExplore);
/// PointerMayBeCapturedBefore - Return true if this pointer value may be
/// captured by the enclosing function (which is required to exist). If a
@@ -44,10 +55,13 @@ namespace llvm {
/// or not. Captures by the provided instruction are considered if the
/// final parameter is true. An ordered basic block in \p OBB could be used
/// to speed up capture-tracker queries.
+ /// MaxUsesToExplore specifies how many uses should the analysis explore for
+ /// one value before giving up due too "too many uses".
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
bool StoreCaptures, const Instruction *I,
const DominatorTree *DT, bool IncludeI = false,
- OrderedBasicBlock *OBB = nullptr);
+ OrderedBasicBlock *OBB = nullptr,
+ unsigned MaxUsesToExplore = DefaultMaxUsesToExplore);
/// This callback is used in conjunction with PointerMayBeCaptured. In
/// addition to the interface here, you'll need to provide your own getters
@@ -75,7 +89,10 @@ namespace llvm {
/// PointerMayBeCaptured - Visit the value and the values derived from it and
/// find values which appear to be capturing the pointer value. This feeds
/// results into and is controlled by the CaptureTracker object.
- void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker);
+ /// MaxUsesToExplore specifies how many uses should the analysis explore for
+ /// one value before giving up due too "too many uses".
+ void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
+ unsigned MaxUsesToExplore = DefaultMaxUsesToExplore);
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CmpInstAnalysis.h b/contrib/llvm/include/llvm/Analysis/CmpInstAnalysis.h
index 3cc69d9fea29..0e9c6a96b0f4 100644
--- a/contrib/llvm/include/llvm/Analysis/CmpInstAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/CmpInstAnalysis.h
@@ -46,19 +46,18 @@ namespace llvm {
///
unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
- /// This is the complement of getICmpCode, which turns an opcode and two
- /// operands into either a constant true or false, or the predicate for a new
- /// ICmp instruction. The sign is passed in to determine which kind of
- /// predicate to use in the new icmp instruction.
+ /// This is the complement of getICmpCode. It turns a predicate code into
+ /// either a constant true or false or the predicate for a new ICmp.
+ /// The sign is passed in to determine which kind of predicate to use in the
+ /// new ICmp instruction.
/// Non-NULL return value will be a true or false constant.
- /// NULL return means a new ICmp is needed. The predicate for which is output
- /// in NewICmpPred.
- Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
- CmpInst::Predicate &NewICmpPred);
+ /// NULL return means a new ICmp is needed. The predicate is output in Pred.
+ Constant *getPredForICmpCode(unsigned Code, bool Sign, Type *OpTy,
+ CmpInst::Predicate &Pred);
/// Return true if both predicates match sign or if at least one of them is an
/// equality comparison (which is signless).
- bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+ bool predicatesFoldable(CmpInst::Predicate P1, CmpInst::Predicate P2);
/// Decompose an icmp into the form ((X & Mask) pred 0) if possible. The
/// returned predicate is either == or !=. Returns false if decomposition
diff --git a/contrib/llvm/include/llvm/Analysis/DemandedBits.h b/contrib/llvm/include/llvm/Analysis/DemandedBits.h
index d4384609762d..4c4e3f6c99e7 100644
--- a/contrib/llvm/include/llvm/Analysis/DemandedBits.h
+++ b/contrib/llvm/include/llvm/Analysis/DemandedBits.h
@@ -44,19 +44,30 @@ public:
F(F), AC(AC), DT(DT) {}
/// Return the bits demanded from instruction I.
+ ///
+ /// For vector instructions individual vector elements are not distinguished:
+ /// A bit is demanded if it is demanded for any of the vector elements. The
+ /// size of the return value corresponds to the type size in bits of the
+ /// scalar type.
+ ///
+ /// Instructions that do not have integer or vector of integer type are
+ /// accepted, but will always produce a mask with all bits set.
APInt getDemandedBits(Instruction *I);
/// Return true if, during analysis, I could not be reached.
bool isInstructionDead(Instruction *I);
+ /// Return whether this use is dead by means of not having any demanded bits.
+ bool isUseDead(Use *U);
+
void print(raw_ostream &OS);
private:
void performAnalysis();
void determineLiveOperandBits(const Instruction *UserI,
- const Instruction *I, unsigned OperandNo,
+ const Value *Val, unsigned OperandNo,
const APInt &AOut, APInt &AB,
- KnownBits &Known, KnownBits &Known2);
+ KnownBits &Known, KnownBits &Known2, bool &KnownBitsComputed);
Function &F;
AssumptionCache &AC;
@@ -67,6 +78,9 @@ private:
// The set of visited instructions (non-integer-typed only).
SmallPtrSet<Instruction*, 32> Visited;
DenseMap<Instruction *, APInt> AliveBits;
+ // Uses with no demanded bits. If the user also has no demanded bits, the use
+ // might not be stored explicitly in this map, to save memory during analysis.
+ SmallPtrSet<Use *, 16> DeadUses;
};
class DemandedBitsWrapperPass : public FunctionPass {
diff --git a/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
index c8ec737a2cb9..69d0e2c1513e 100644
--- a/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -936,6 +936,17 @@ template <typename T> class ArrayRef;
friend struct AnalysisInfoMixin<DependenceAnalysis>;
}; // class DependenceAnalysis
+ /// Printer pass to dump DA results.
+ struct DependenceAnalysisPrinterPass
+ : public PassInfoMixin<DependenceAnalysisPrinterPass> {
+ DependenceAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+
+ private:
+ raw_ostream &OS;
+ }; // class DependenceAnalysisPrinterPass
+
/// Legacy pass manager pass to access dependence information
class DependenceAnalysisWrapperPass : public FunctionPass {
public:
diff --git a/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h
index 328c8645d3c0..d834862db095 100644
--- a/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h
@@ -7,55 +7,199 @@
//
//===----------------------------------------------------------------------===//
//
-// The divergence analysis is an LLVM pass which can be used to find out
-// if a branch instruction in a GPU program is divergent or not. It can help
-// branch optimizations such as jump threading and loop unswitching to make
-// better decisions.
+// \file
+// The divergence analysis determines which instructions and branches are
+// divergent given a set of divergent source instructions.
//
//===----------------------------------------------------------------------===//
+
#ifndef LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
#define LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
#include "llvm/ADT/DenseSet.h"
+#include "llvm/Analysis/SyncDependenceAnalysis.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
+#include <vector>
namespace llvm {
+class Module;
class Value;
-class DivergenceAnalysis : public FunctionPass {
+class Instruction;
+class Loop;
+class raw_ostream;
+class TargetTransformInfo;
+
+/// \brief Generic divergence analysis for reducible CFGs.
+///
+/// This analysis propagates divergence in a data-parallel context from sources
+/// of divergence to all users. It requires reducible CFGs. All assignments
+/// should be in SSA form.
+class DivergenceAnalysis {
public:
- static char ID;
+ /// \brief This instance will analyze the whole function \p F or the loop \p
+ /// RegionLoop.
+ ///
+ /// \param RegionLoop if non-null the analysis is restricted to \p RegionLoop.
+ /// Otherwise the whole function is analyzed.
+ /// \param IsLCSSAForm whether the analysis may assume that the IR in the
+ /// region in in LCSSA form.
+ DivergenceAnalysis(const Function &F, const Loop *RegionLoop,
+ const DominatorTree &DT, const LoopInfo &LI,
+ SyncDependenceAnalysis &SDA, bool IsLCSSAForm);
- DivergenceAnalysis() : FunctionPass(ID) {
- initializeDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
- }
+ /// \brief The loop that defines the analyzed region (if any).
+ const Loop *getRegionLoop() const { return RegionLoop; }
+ const Function &getFunction() const { return F; }
- void getAnalysisUsage(AnalysisUsage &AU) const override;
+ /// \brief Whether \p BB is part of the region.
+ bool inRegion(const BasicBlock &BB) const;
+ /// \brief Whether \p I is part of the region.
+ bool inRegion(const Instruction &I) const;
- bool runOnFunction(Function &F) override;
+ /// \brief Mark \p UniVal as a value that is always uniform.
+ void addUniformOverride(const Value &UniVal);
- // Print all divergent branches in the function.
- void print(raw_ostream &OS, const Module *) const override;
+ /// \brief Mark \p DivVal as a value that is always divergent.
+ void markDivergent(const Value &DivVal);
- // Returns true if V is divergent at its definition.
- //
- // Even if this function returns false, V may still be divergent when used
- // in a different basic block.
- bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
+ /// \brief Propagate divergence to all instructions in the region.
+ /// Divergence is seeded by calls to \p markDivergent.
+ void compute();
+
+ /// \brief Whether any value was marked or analyzed to be divergent.
+ bool hasDetectedDivergence() const { return !DivergentValues.empty(); }
+
+ /// \brief Whether \p Val will always return a uniform value regardless of its
+ /// operands
+ bool isAlwaysUniform(const Value &Val) const;
+
+ /// \brief Whether \p Val is a divergent value
+ bool isDivergent(const Value &Val) const;
+
+ void print(raw_ostream &OS, const Module *) const;
+
+private:
+ bool updateTerminator(const Instruction &Term) const;
+ bool updatePHINode(const PHINode &Phi) const;
+
+ /// \brief Computes whether \p Inst is divergent based on the
+ /// divergence of its operands.
+ ///
+ /// \returns Whether \p Inst is divergent.
+ ///
+ /// This should only be called for non-phi, non-terminator instructions.
+ bool updateNormalInstruction(const Instruction &Inst) const;
+
+ /// \brief Mark users of live-out users as divergent.
+ ///
+ /// \param LoopHeader the header of the divergent loop.
+ ///
+ /// Marks all users of live-out values of the loop headed by \p LoopHeader
+ /// as divergent and puts them on the worklist.
+ void taintLoopLiveOuts(const BasicBlock &LoopHeader);
+
+ /// \brief Push all users of \p Val (in the region) to the worklist
+ void pushUsers(const Value &I);
+
+ /// \brief Push all phi nodes in @block to the worklist
+ void pushPHINodes(const BasicBlock &Block);
+
+ /// \brief Mark \p Block as join divergent
+ ///
+ /// A block is join divergent if two threads may reach it from different
+ /// incoming blocks at the same time.
+ void markBlockJoinDivergent(const BasicBlock &Block) {
+ DivergentJoinBlocks.insert(&Block);
+ }
+
+ /// \brief Whether \p Val is divergent when read in \p ObservingBlock.
+ bool isTemporalDivergent(const BasicBlock &ObservingBlock,
+ const Value &Val) const;
+
+ /// \brief Whether \p Block is join divergent
+ ///
+ /// (see markBlockJoinDivergent).
+ bool isJoinDivergent(const BasicBlock &Block) const {
+ return DivergentJoinBlocks.find(&Block) != DivergentJoinBlocks.end();
+ }
- // Returns true if V is uniform/non-divergent.
+ /// \brief Propagate control-induced divergence to users (phi nodes and
+ /// instructions).
//
- // Even if this function returns true, V may still be divergent when used
- // in a different basic block.
- bool isUniform(const Value *V) const { return !isDivergent(V); }
+ // \param JoinBlock is a divergent loop exit or join point of two disjoint
+ // paths.
+ // \returns Whether \p JoinBlock is a divergent loop exit of \p TermLoop.
+ bool propagateJoinDivergence(const BasicBlock &JoinBlock,
+ const Loop *TermLoop);
- // Keep the analysis results uptodate by removing an erased value.
- void removeValue(const Value *V) { DivergentValues.erase(V); }
+ /// \brief Propagate induced value divergence due to control divergence in \p
+ /// Term.
+ void propagateBranchDivergence(const Instruction &Term);
+
+ /// \brief Propagate divergent caused by a divergent loop exit.
+ ///
+ /// \param ExitingLoop is a divergent loop.
+ void propagateLoopDivergence(const Loop &ExitingLoop);
private:
- // Stores all divergent values.
+ const Function &F;
+ // If regionLoop != nullptr, analysis is only performed within \p RegionLoop.
+ // Otw, analyze the whole function
+ const Loop *RegionLoop;
+
+ const DominatorTree &DT;
+ const LoopInfo &LI;
+
+ // Recognized divergent loops
+ DenseSet<const Loop *> DivergentLoops;
+
+ // The SDA links divergent branches to divergent control-flow joins.
+ SyncDependenceAnalysis &SDA;
+
+ // Use simplified code path for LCSSA form.
+ bool IsLCSSAForm;
+
+ // Set of known-uniform values.
+ DenseSet<const Value *> UniformOverrides;
+
+ // Blocks with joining divergent control from different predecessors.
+ DenseSet<const BasicBlock *> DivergentJoinBlocks;
+
+ // Detected/marked divergent values.
DenseSet<const Value *> DivergentValues;
+
+ // Internal worklist for divergence propagation.
+ std::vector<const Instruction *> Worklist;
};
-} // End llvm namespace
-#endif //LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H \ No newline at end of file
+/// \brief Divergence analysis frontend for GPU kernels.
+class GPUDivergenceAnalysis {
+ SyncDependenceAnalysis SDA;
+ DivergenceAnalysis DA;
+
+public:
+ /// Runs the divergence analysis on @F, a GPU kernel
+ GPUDivergenceAnalysis(Function &F, const DominatorTree &DT,
+ const PostDominatorTree &PDT, const LoopInfo &LI,
+ const TargetTransformInfo &TTI);
+
+ /// Whether any divergence was detected.
+ bool hasDivergence() const { return DA.hasDetectedDivergence(); }
+
+ /// The GPU kernel this analysis result is for
+ const Function &getFunction() const { return DA.getFunction(); }
+
+ /// Whether \p V is divergent.
+ bool isDivergent(const Value &V) const;
+
+ /// Whether \p V is uniform/non-divergent
+ bool isUniform(const Value &V) const { return !isDivergent(V); }
+
+ /// Print all divergent values in the kernel.
+ void print(raw_ostream &OS, const Module *) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
diff --git a/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h b/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h
index 09cef68ce70f..3a664ca6ef50 100644
--- a/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h
+++ b/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -88,7 +88,7 @@ public:
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
using AAResultBase::getModRefInfo;
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
@@ -98,7 +98,7 @@ public:
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
private:
FunctionInfo *getFunctionInfo(const Function *F);
@@ -113,7 +113,7 @@ private:
void CollectSCCMembership(CallGraph &CG);
bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
- ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS,
+ ModRefInfo getModRefInfoForArgument(const CallBase *Call,
const GlobalValue *GV);
};
diff --git a/contrib/llvm/include/llvm/Analysis/GuardUtils.h b/contrib/llvm/include/llvm/Analysis/GuardUtils.h
new file mode 100644
index 000000000000..3b151eeafc81
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/GuardUtils.h
@@ -0,0 +1,26 @@
+//===-- GuardUtils.h - Utils for work with guards ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Utils that are used to perform analyzes related to guards and their
+// conditions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_GUARDUTILS_H
+#define LLVM_ANALYSIS_GUARDUTILS_H
+
+namespace llvm {
+
+class User;
+
+/// Returns true iff \p U has semantics of a guard.
+bool isGuard(const User *U);
+
+} // llvm
+
+#endif // LLVM_ANALYSIS_GUARDUTILS_H
+
diff --git a/contrib/llvm/include/llvm/Analysis/IVDescriptors.h b/contrib/llvm/include/llvm/Analysis/IVDescriptors.h
new file mode 100644
index 000000000000..64b4ae23cc59
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IVDescriptors.h
@@ -0,0 +1,357 @@
+//===- llvm/Analysis/IVDescriptors.h - IndVar Descriptors -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file "describes" induction and recurrence variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IVDESCRIPTORS_H
+#define LLVM_ANALYSIS_IVDESCRIPTORS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DemandedBits.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+class AliasSet;
+class AliasSetTracker;
+class BasicBlock;
+class DataLayout;
+class Loop;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PredicatedScalarEvolution;
+class PredIteratorCache;
+class ScalarEvolution;
+class SCEV;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+/// The RecurrenceDescriptor is used to identify recurrences variables in a
+/// loop. Reduction is a special case of recurrence that has uses of the
+/// recurrence variable outside the loop. The method isReductionPHI identifies
+/// reductions that are basic recurrences.
+///
+/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
+/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
+/// array[i]; } is a summation of array elements. Basic recurrences are a
+/// special case of chains of recurrences (CR). See ScalarEvolution for CR
+/// references.
+
+/// This struct holds information about recurrence variables.
+class RecurrenceDescriptor {
+public:
+ /// This enum represents the kinds of recurrences that we support.
+ enum RecurrenceKind {
+ RK_NoRecurrence, ///< Not a recurrence.
+ RK_IntegerAdd, ///< Sum of integers.
+ RK_IntegerMult, ///< Product of integers.
+ RK_IntegerOr, ///< Bitwise or logical OR of numbers.
+ RK_IntegerAnd, ///< Bitwise or logical AND of numbers.
+ RK_IntegerXor, ///< Bitwise or logical XOR of numbers.
+ RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
+ RK_FloatAdd, ///< Sum of floats.
+ RK_FloatMult, ///< Product of floats.
+ RK_FloatMinMax ///< Min/max implemented in terms of select(cmp()).
+ };
+
+ // This enum represents the kind of minmax recurrence.
+ enum MinMaxRecurrenceKind {
+ MRK_Invalid,
+ MRK_UIntMin,
+ MRK_UIntMax,
+ MRK_SIntMin,
+ MRK_SIntMax,
+ MRK_FloatMin,
+ MRK_FloatMax
+ };
+
+ RecurrenceDescriptor() = default;
+
+ RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
+ MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
+ bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+ : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
+ UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+ CastInsts.insert(CI.begin(), CI.end());
+ }
+
+ /// This POD struct holds information about a potential recurrence operation.
+ class InstDesc {
+ public:
+ InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
+ : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
+ UnsafeAlgebraInst(UAI) {}
+
+ InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
+ : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
+ UnsafeAlgebraInst(UAI) {}
+
+ bool isRecurrence() { return IsRecurrence; }
+
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
+
+ Instruction *getPatternInst() { return PatternLastInst; }
+
+ private:
+ // Is this instruction a recurrence candidate.
+ bool IsRecurrence;
+ // The last instruction in a min/max pattern (select of the select(icmp())
+ // pattern), or the current recurrence instruction otherwise.
+ Instruction *PatternLastInst;
+ // If this is a min/max pattern the comparison predicate.
+ MinMaxRecurrenceKind MinMaxKind;
+ // Recurrence has unsafe algebra.
+ Instruction *UnsafeAlgebraInst;
+ };
+
+ /// Returns a struct describing if the instruction 'I' can be a recurrence
+ /// variable of type 'Kind'. If the recurrence is a min/max pattern of
+ /// select(icmp()) this function advances the instruction pointer 'I' from the
+ /// compare instruction to the select instruction and stores this pointer in
+ /// 'PatternLastInst' member of the returned struct.
+ static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+ InstDesc &Prev, bool HasFunNoNaNAttr);
+
+ /// Returns true if instruction I has multiple uses in Insts
+ static bool hasMultipleUsesOf(Instruction *I,
+ SmallPtrSetImpl<Instruction *> &Insts,
+ unsigned MaxNumUses);
+
+ /// Returns true if all uses of the instruction I is within the Set.
+ static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+
+ /// Returns a struct describing if the instruction if the instruction is a
+ /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
+ /// or max(X, Y).
+ static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
+
+ /// Returns a struct describing if the instruction is a
+ /// Select(FCmp(X, Y), (Z = X op PHINode), PHINode) instruction pattern.
+ static InstDesc isConditionalRdxPattern(RecurrenceKind Kind, Instruction *I);
+
+ /// Returns identity corresponding to the RecurrenceKind.
+ static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
+
+ /// Returns the opcode of binary operation corresponding to the
+ /// RecurrenceKind.
+ static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
+
+ /// Returns true if Phi is a reduction of type Kind and adds it to the
+ /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
+ /// non-null, the minimal bit width needed to compute the reduction will be
+ /// computed.
+ static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
+ bool HasFunNoNaNAttr,
+ RecurrenceDescriptor &RedDes,
+ DemandedBits *DB = nullptr,
+ AssumptionCache *AC = nullptr,
+ DominatorTree *DT = nullptr);
+
+ /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor
+ /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
+ /// non-null, the minimal bit width needed to compute the reduction will be
+ /// computed.
+ static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
+ RecurrenceDescriptor &RedDes,
+ DemandedBits *DB = nullptr,
+ AssumptionCache *AC = nullptr,
+ DominatorTree *DT = nullptr);
+
+ /// Returns true if Phi is a first-order recurrence. A first-order recurrence
+ /// is a non-reduction recurrence relation in which the value of the
+ /// recurrence in the current loop iteration equals a value defined in the
+ /// previous iteration. \p SinkAfter includes pairs of instructions where the
+ /// first will be rescheduled to appear after the second if/when the loop is
+ /// vectorized. It may be augmented with additional pairs if needed in order
+ /// to handle Phi as a first-order recurrence.
+ static bool
+ isFirstOrderRecurrence(PHINode *Phi, Loop *TheLoop,
+ DenseMap<Instruction *, Instruction *> &SinkAfter,
+ DominatorTree *DT);
+
+ RecurrenceKind getRecurrenceKind() { return Kind; }
+
+ MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
+
+ TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
+
+ Instruction *getLoopExitInstr() { return LoopExitInstr; }
+
+ /// Returns true if the recurrence has unsafe algebra which requires a relaxed
+ /// floating-point model.
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ /// Returns first unsafe algebra instruction in the PHI node's use-chain.
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ /// Returns true if the recurrence kind is an integer kind.
+ static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is a floating point kind.
+ static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is an arithmetic kind.
+ static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns the type of the recurrence. This type can be narrower than the
+ /// actual type of the Phi if the recurrence has been type-promoted.
+ Type *getRecurrenceType() { return RecurrenceType; }
+
+ /// Returns a reference to the instructions used for type-promoting the
+ /// recurrence.
+ SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
+
+ /// Returns true if all source operands of the recurrence are SExtInsts.
+ bool isSigned() { return IsSigned; }
+
+private:
+ // The starting value of the recurrence.
+ // It does not have to be zero!
+ TrackingVH<Value> StartValue;
+ // The instruction who's value is used outside the loop.
+ Instruction *LoopExitInstr = nullptr;
+ // The kind of the recurrence.
+ RecurrenceKind Kind = RK_NoRecurrence;
+ // If this a min/max recurrence the kind of recurrence.
+ MinMaxRecurrenceKind MinMaxKind = MRK_Invalid;
+ // First occurrence of unasfe algebra in the PHI's use-chain.
+ Instruction *UnsafeAlgebraInst = nullptr;
+ // The type of the recurrence.
+ Type *RecurrenceType = nullptr;
+ // True if all source operands of the recurrence are SExtInsts.
+ bool IsSigned = false;
+ // Instructions used for type-promoting the recurrence.
+ SmallPtrSet<Instruction *, 8> CastInsts;
+};
+
+/// A struct for saving information about induction variables.
+class InductionDescriptor {
+public:
+ /// This enum represents the kinds of inductions that we support.
+ enum InductionKind {
+ IK_NoInduction, ///< Not an induction variable.
+ IK_IntInduction, ///< Integer induction variable. Step = C.
+ IK_PtrInduction, ///< Pointer induction var. Step = C / sizeof(elem).
+ IK_FpInduction ///< Floating point induction variable.
+ };
+
+public:
+ /// Default constructor - creates an invalid induction.
+ InductionDescriptor() = default;
+
+ /// Get the consecutive direction. Returns:
+ /// 0 - unknown or non-consecutive.
+ /// 1 - consecutive and increasing.
+ /// -1 - consecutive and decreasing.
+ int getConsecutiveDirection() const;
+
+ Value *getStartValue() const { return StartValue; }
+ InductionKind getKind() const { return IK; }
+ const SCEV *getStep() const { return Step; }
+ BinaryOperator *getInductionBinOp() const { return InductionBinOp; }
+ ConstantInt *getConstIntStepValue() const;
+
+ /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
+ /// induction, the induction descriptor \p D will contain the data describing
+ /// this induction. If by some other means the caller has a better SCEV
+ /// expression for \p Phi than the one returned by the ScalarEvolution
+ /// analysis, it can be passed through \p Expr. If the def-use chain
+ /// associated with the phi includes casts (that we know we can ignore
+ /// under proper runtime checks), they are passed through \p CastsToIgnore.
+ static bool
+ isInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
+ InductionDescriptor &D, const SCEV *Expr = nullptr,
+ SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);
+
+ /// Returns true if \p Phi is a floating point induction in the loop \p L.
+ /// If \p Phi is an induction, the induction descriptor \p D will contain
+ /// the data describing this induction.
+ static bool isFPInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
+ InductionDescriptor &D);
+
+ /// Returns true if \p Phi is a loop \p L induction, in the context associated
+ /// with the run-time predicate of PSE. If \p Assume is true, this can add
+ /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
+ /// induction.
+ /// If \p Phi is an induction, \p D will contain the data describing this
+ /// induction.
+ static bool isInductionPHI(PHINode *Phi, const Loop *L,
+ PredicatedScalarEvolution &PSE,
+ InductionDescriptor &D, bool Assume = false);
+
+ /// Returns true if the induction type is FP and the binary operator does
+ /// not have the "fast-math" property. Such operation requires a relaxed FP
+ /// mode.
+ bool hasUnsafeAlgebra() {
+ return InductionBinOp && !cast<FPMathOperator>(InductionBinOp)->isFast();
+ }
+
+ /// Returns induction operator that does not have "fast-math" property
+ /// and requires FP unsafe mode.
+ Instruction *getUnsafeAlgebraInst() {
+ if (!InductionBinOp || cast<FPMathOperator>(InductionBinOp)->isFast())
+ return nullptr;
+ return InductionBinOp;
+ }
+
+ /// Returns binary opcode of the induction operator.
+ Instruction::BinaryOps getInductionOpcode() const {
+ return InductionBinOp ? InductionBinOp->getOpcode()
+ : Instruction::BinaryOpsEnd;
+ }
+
+ /// Returns a reference to the type cast instructions in the induction
+ /// update chain, that are redundant when guarded with a runtime
+ /// SCEV overflow check.
+ const SmallVectorImpl<Instruction *> &getCastInsts() const {
+ return RedundantCasts;
+ }
+
+private:
+ /// Private constructor - used by \c isInductionPHI.
+ InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
+ BinaryOperator *InductionBinOp = nullptr,
+ SmallVectorImpl<Instruction *> *Casts = nullptr);
+
+ /// Start value.
+ TrackingVH<Value> StartValue;
+ /// Induction kind.
+ InductionKind IK = IK_NoInduction;
+ /// Step value.
+ const SCEV *Step = nullptr;
+ // Instruction that advances induction variable.
+ BinaryOperator *InductionBinOp = nullptr;
+ // Instructions used for type-casts of the induction variable,
+ // that are redundant when guarded with a runtime SCEV overflow check.
+ SmallVector<Instruction *, 2> RedundantCasts;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_IVDESCRIPTORS_H
diff --git a/contrib/llvm/include/llvm/Analysis/IndirectCallSiteVisitor.h b/contrib/llvm/include/llvm/Analysis/IndirectCallSiteVisitor.h
deleted file mode 100644
index dde56a143c51..000000000000
--- a/contrib/llvm/include/llvm/Analysis/IndirectCallSiteVisitor.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//===-- IndirectCallSiteVisitor.h - indirect call-sites visitor -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements defines a visitor class and a helper function that find
-// all indirect call-sites in a function.
-
-#include "llvm/IR/InstVisitor.h"
-#include <vector>
-
-namespace llvm {
-// Visitor class that finds all indirect call sites.
-struct PGOIndirectCallSiteVisitor
- : public InstVisitor<PGOIndirectCallSiteVisitor> {
- std::vector<Instruction *> IndirectCallInsts;
- PGOIndirectCallSiteVisitor() {}
-
- void visitCallSite(CallSite CS) {
- if (CS.isIndirectCall())
- IndirectCallInsts.push_back(CS.getInstruction());
- }
-};
-
-// Helper function that finds all indirect call sites.
-inline std::vector<Instruction *> findIndirectCallSites(Function &F) {
- PGOIndirectCallSiteVisitor ICV;
- ICV.visit(F);
- return ICV.IndirectCallInsts;
-}
-}
diff --git a/contrib/llvm/include/llvm/Analysis/IndirectCallVisitor.h b/contrib/llvm/include/llvm/Analysis/IndirectCallVisitor.h
new file mode 100644
index 000000000000..d00cf63368f1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IndirectCallVisitor.h
@@ -0,0 +1,39 @@
+//===-- IndirectCallVisitor.h - indirect call visitor ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements defines a visitor class and a helper function that find
+// all indirect call-sites in a function.
+
+#ifndef LLVM_ANALYSIS_INDIRECTCALLVISITOR_H
+#define LLVM_ANALYSIS_INDIRECTCALLVISITOR_H
+
+#include "llvm/IR/InstVisitor.h"
+#include <vector>
+
+namespace llvm {
+// Visitor class that finds all indirect call.
+struct PGOIndirectCallVisitor : public InstVisitor<PGOIndirectCallVisitor> {
+ std::vector<Instruction *> IndirectCalls;
+ PGOIndirectCallVisitor() {}
+
+ void visitCallBase(CallBase &Call) {
+ if (Call.isIndirectCall())
+ IndirectCalls.push_back(&Call);
+ }
+};
+
+// Helper function that finds all indirect call sites.
+inline std::vector<Instruction *> findIndirectCalls(Function &F) {
+ PGOIndirectCallVisitor ICV;
+ ICV.visit(F);
+ return ICV.IndirectCalls;
+}
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index 8c412057fb81..4c270354b0c4 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -46,7 +46,6 @@ const int IndirectCallThreshold = 100;
const int CallPenalty = 25;
const int LastCallToStaticBonus = 15000;
const int ColdccPenalty = 2000;
-const int NoreturnPenalty = 10000;
/// Do not inline functions which allocate this many bytes on the stack
/// when the caller is recursive.
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
@@ -74,8 +73,15 @@ class InlineCost {
/// The adjusted threshold against which this cost was computed.
const int Threshold;
+ /// Must be set for Always and Never instances.
+ const char *Reason = nullptr;
+
// Trivial constructor, interesting logic in the factory functions below.
- InlineCost(int Cost, int Threshold) : Cost(Cost), Threshold(Threshold) {}
+ InlineCost(int Cost, int Threshold, const char *Reason = nullptr)
+ : Cost(Cost), Threshold(Threshold), Reason(Reason) {
+ assert((isVariable() || Reason) &&
+ "Reason must be provided for Never or Always");
+ }
public:
static InlineCost get(int Cost, int Threshold) {
@@ -83,11 +89,11 @@ public:
assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
return InlineCost(Cost, Threshold);
}
- static InlineCost getAlways() {
- return InlineCost(AlwaysInlineCost, 0);
+ static InlineCost getAlways(const char *Reason) {
+ return InlineCost(AlwaysInlineCost, 0, Reason);
}
- static InlineCost getNever() {
- return InlineCost(NeverInlineCost, 0);
+ static InlineCost getNever(const char *Reason) {
+ return InlineCost(NeverInlineCost, 0, Reason);
}
/// Test whether the inline cost is low enough for inlining.
@@ -112,12 +118,30 @@ public:
return Threshold;
}
+ /// Get the reason of Always or Never.
+ const char *getReason() const {
+ assert((Reason || isVariable()) &&
+ "InlineCost reason must be set for Always or Never");
+ return Reason;
+ }
+
/// Get the cost delta from the threshold for inlining.
/// Only valid if the cost is of the variable kind. Returns a negative
/// value if the cost is too high to inline.
int getCostDelta() const { return Threshold - getCost(); }
};
+/// InlineResult is basically true or false. For false results the message
+/// describes a reason why it is decided not to inline.
+struct InlineResult {
+ const char *message = nullptr;
+ InlineResult(bool result, const char *message = nullptr)
+ : message(result ? nullptr : (message ? message : "cost > threshold")) {}
+ InlineResult(const char *message = nullptr) : message(message) {}
+ operator bool() const { return !message; }
+ operator const char *() const { return message; }
+};
+
/// Thresholds to tune inline cost analysis. The inline cost analysis decides
/// the condition to apply a threshold and applies it. Otherwise,
/// DefaultThreshold is used. If a threshold is Optional, it is applied only
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h b/contrib/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
new file mode 100644
index 000000000000..073e6ec3b7f6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
@@ -0,0 +1,150 @@
+//===-- InstructionPrecedenceTracking.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Implements a class that is able to define some instructions as "special"
+// (e.g. as having implicit control flow, or writing memory, or having another
+// interesting property) and then efficiently answers queries of the types:
+// 1. Are there any special instructions in the block of interest?
+// 2. Return first of the special instructions in the given block;
+// 3. Check if the given instruction is preceeded by the first special
+// instruction in the same block.
+// The class provides caching that allows to answer these queries quickly. The
+// user must make sure that the cached data is invalidated properly whenever
+// a content of some tracked block is changed.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
+#define LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
+
+#include "llvm/IR/Dominators.h"
+#include "llvm/Analysis/OrderedInstructions.h"
+
+namespace llvm {
+
+class InstructionPrecedenceTracking {
+ // Maps a block to the topmost special instruction in it. If the value is
+ // nullptr, it means that it is known that this block does not contain any
+ // special instructions.
+ DenseMap<const BasicBlock *, const Instruction *> FirstSpecialInsts;
+ // Allows to answer queries about precedence of instructions within one block.
+ OrderedInstructions OI;
+
+ // Fills information about the given block's special instructions.
+ void fill(const BasicBlock *BB);
+
+#ifndef NDEBUG
+ /// Asserts that the cached info for \p BB is up-to-date. This helps to catch
+ /// the usage error of accessing a block without properly invalidating after a
+ /// previous transform.
+ void validate(const BasicBlock *BB) const;
+
+ /// Asserts whether or not the contents of this tracking is up-to-date. This
+ /// helps to catch the usage error of accessing a block without properly
+ /// invalidating after a previous transform.
+ void validateAll() const;
+#endif
+
+protected:
+ InstructionPrecedenceTracking(DominatorTree *DT)
+ : OI(OrderedInstructions(DT)) {}
+
+ /// Returns the topmost special instruction from the block \p BB. Returns
+ /// nullptr if there is no special instructions in the block.
+ const Instruction *getFirstSpecialInstruction(const BasicBlock *BB);
+
+ /// Returns true iff at least one instruction from the basic block \p BB is
+ /// special.
+ bool hasSpecialInstructions(const BasicBlock *BB);
+
+ /// Returns true iff the first special instruction of \p Insn's block exists
+ /// and dominates \p Insn.
+ bool isPreceededBySpecialInstruction(const Instruction *Insn);
+
+ /// A predicate that defines whether or not the instruction \p Insn is
+ /// considered special and needs to be tracked. Implementing this method in
+ /// children classes allows to implement tracking of implicit control flow,
+ /// memory writing instructions or any other kinds of instructions we might
+ /// be interested in.
+ virtual bool isSpecialInstruction(const Instruction *Insn) const = 0;
+
+ virtual ~InstructionPrecedenceTracking() = default;
+
+public:
+ /// Notifies this tracking that we are going to insert a new instruction \p
+ /// Inst to the basic block \p BB. It makes all necessary updates to internal
+ /// caches to keep them consistent.
+ void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
+
+ /// Notifies this tracking that we are going to remove the instruction \p Inst
+ /// It makes all necessary updates to internal caches to keep them consistent.
+ void removeInstruction(const Instruction *Inst);
+
+ /// Invalidates all information from this tracking.
+ void clear();
+};
+
+/// This class allows to keep track on instructions with implicit control flow.
+/// These are instructions that may not pass execution to their successors. For
+/// example, throwing calls and guards do not always do this. If we need to know
+/// for sure that some instruction is guaranteed to execute if the given block
+/// is reached, then we need to make sure that there is no implicit control flow
+/// instruction (ICFI) preceeding it. For example, this check is required if we
+/// perform PRE moving non-speculable instruction to other place.
+class ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
+public:
+ ImplicitControlFlowTracking(DominatorTree *DT)
+ : InstructionPrecedenceTracking(DT) {}
+
+ /// Returns the topmost instruction with implicit control flow from the given
+ /// basic block. Returns nullptr if there is no such instructions in the block.
+ const Instruction *getFirstICFI(const BasicBlock *BB) {
+ return getFirstSpecialInstruction(BB);
+ }
+
+ /// Returns true if at least one instruction from the given basic block has
+ /// implicit control flow.
+ bool hasICF(const BasicBlock *BB) {
+ return hasSpecialInstructions(BB);
+ }
+
+ /// Returns true if the first ICFI of Insn's block exists and dominates Insn.
+ bool isDominatedByICFIFromSameBlock(const Instruction *Insn) {
+ return isPreceededBySpecialInstruction(Insn);
+ }
+
+ virtual bool isSpecialInstruction(const Instruction *Insn) const;
+};
+
+class MemoryWriteTracking : public InstructionPrecedenceTracking {
+public:
+ MemoryWriteTracking(DominatorTree *DT) : InstructionPrecedenceTracking(DT) {}
+
+ /// Returns the topmost instruction that may write memory from the given
+ /// basic block. Returns nullptr if there is no such instructions in the block.
+ const Instruction *getFirstMemoryWrite(const BasicBlock *BB) {
+ return getFirstSpecialInstruction(BB);
+ }
+
+ /// Returns true if at least one instruction from the given basic block may
+ /// write memory.
+ bool mayWriteToMemory(const BasicBlock *BB) {
+ return hasSpecialInstructions(BB);
+ }
+
+ /// Returns true if the first memory writing instruction of Insn's block
+ /// exists and dominates Insn.
+ bool isDominatedByMemoryWriteFromSameBlock(const Instruction *Insn) {
+ return isPreceededBySpecialInstruction(Insn);
+ }
+
+ virtual bool isSpecialInstruction(const Instruction *Insn) const;
+};
+
+} // llvm
+
+#endif // LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
index 4f896bddff87..6662e91037e1 100644
--- a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -32,6 +32,8 @@
#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
namespace llvm {
@@ -40,7 +42,6 @@ template <typename T, typename... TArgs> class AnalysisManager;
template <class T> class ArrayRef;
class AssumptionCache;
class DominatorTree;
-class Instruction;
class ImmutableCallSite;
class DataLayout;
class FastMathFlags;
@@ -50,6 +51,41 @@ class Pass;
class TargetLibraryInfo;
class Type;
class Value;
+class MDNode;
+class BinaryOperator;
+
+/// InstrInfoQuery provides an interface to query additional information for
+/// instructions like metadata or keywords like nsw, which provides conservative
+/// results if the users specified it is safe to use.
+struct InstrInfoQuery {
+ InstrInfoQuery(bool UMD) : UseInstrInfo(UMD) {}
+ InstrInfoQuery() : UseInstrInfo(true) {}
+ bool UseInstrInfo = true;
+
+ MDNode *getMetadata(const Instruction *I, unsigned KindID) const {
+ if (UseInstrInfo)
+ return I->getMetadata(KindID);
+ return nullptr;
+ }
+
+ template <class InstT> bool hasNoUnsignedWrap(const InstT *Op) const {
+ if (UseInstrInfo)
+ return Op->hasNoUnsignedWrap();
+ return false;
+ }
+
+ template <class InstT> bool hasNoSignedWrap(const InstT *Op) const {
+ if (UseInstrInfo)
+ return Op->hasNoSignedWrap();
+ return false;
+ }
+
+ bool isExact(const BinaryOperator *Op) const {
+ if (UseInstrInfo && isa<PossiblyExactOperator>(Op))
+ return cast<PossiblyExactOperator>(Op)->isExact();
+ return false;
+ }
+};
struct SimplifyQuery {
const DataLayout &DL;
@@ -58,14 +94,19 @@ struct SimplifyQuery {
AssumptionCache *AC = nullptr;
const Instruction *CxtI = nullptr;
+ // Wrapper to query additional information for instructions like metadata or
+ // keywords like nsw, which provides conservative results if those cannot
+ // be safely used.
+ const InstrInfoQuery IIQ;
+
SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
: DL(DL), CxtI(CXTI) {}
SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
- const Instruction *CXTI = nullptr)
- : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
+ const Instruction *CXTI = nullptr, bool UseInstrInfo = true)
+ : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI), IIQ(UseInstrInfo) {}
SimplifyQuery getWithInstruction(Instruction *I) const {
SimplifyQuery Copy(*this);
Copy.CxtI = I;
diff --git a/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h b/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h
index 6b1950733246..3083db75b81c 100644
--- a/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h
+++ b/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -6,7 +6,7 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
+/// \file
/// Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
@@ -28,6 +28,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFGDiff.h"
#include "llvm/IR/Dominators.h"
namespace llvm {
@@ -45,17 +46,21 @@ namespace llvm {
template <class NodeTy, bool IsPostDom>
class IDFCalculator {
public:
- IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
- : DT(DT), useLiveIn(false) {}
+ IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
+ : DT(DT), GD(nullptr), useLiveIn(false) {}
- /// Give the IDF calculator the set of blocks in which the value is
- /// defined. This is equivalent to the set of starting blocks it should be
- /// calculating the IDF for (though later gets pruned based on liveness).
- ///
- /// Note: This set *must* live for the entire lifetime of the IDF calculator.
- void setDefiningBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
- DefBlocks = &Blocks;
- }
+ IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT,
+ const GraphDiff<BasicBlock *, IsPostDom> *GD)
+ : DT(DT), GD(GD), useLiveIn(false) {}
+
+ /// Give the IDF calculator the set of blocks in which the value is
+ /// defined. This is equivalent to the set of starting blocks it should be
+ /// calculating the IDF for (though later gets pruned based on liveness).
+ ///
+ /// Note: This set *must* live for the entire lifetime of the IDF calculator.
+ void setDefiningBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
+ DefBlocks = &Blocks;
+ }
/// Give the IDF calculator the set of blocks in which the value is
/// live on entry to the block. This is used to prune the IDF calculation to
@@ -85,6 +90,7 @@ class IDFCalculator {
private:
DominatorTreeBase<BasicBlock, IsPostDom> &DT;
+ const GraphDiff<BasicBlock *, IsPostDom> *GD;
bool useLiveIn;
const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
diff --git a/contrib/llvm/include/llvm/Analysis/LegacyDivergenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/LegacyDivergenceAnalysis.h
new file mode 100644
index 000000000000..fc426ad7fb64
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LegacyDivergenceAnalysis.h
@@ -0,0 +1,69 @@
+//===- llvm/Analysis/LegacyDivergenceAnalysis.h - KernelDivergence Analysis -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The kernel divergence analysis is an LLVM pass which can be used to find out
+// if a branch instruction in a GPU program (kernel) is divergent or not. It can help
+// branch optimizations such as jump threading and loop unswitching to make
+// better decisions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H
+#define LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+
+namespace llvm {
+class Value;
+class GPUDivergenceAnalysis;
+class LegacyDivergenceAnalysis : public FunctionPass {
+public:
+ static char ID;
+
+ LegacyDivergenceAnalysis() : FunctionPass(ID) {
+ initializeLegacyDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+
+ // Print all divergent branches in the function.
+ void print(raw_ostream &OS, const Module *) const override;
+
+ // Returns true if V is divergent at its definition.
+ //
+ // Even if this function returns false, V may still be divergent when used
+ // in a different basic block.
+ bool isDivergent(const Value *V) const;
+
+ // Returns true if V is uniform/non-divergent.
+ //
+ // Even if this function returns true, V may still be divergent when used
+ // in a different basic block.
+ bool isUniform(const Value *V) const { return !isDivergent(V); }
+
+ // Keep the analysis results uptodate by removing an erased value.
+ void removeValue(const Value *V) { DivergentValues.erase(V); }
+
+private:
+ // Whether analysis should be performed by GPUDivergenceAnalysis.
+ bool shouldUseGPUDivergenceAnalysis(const Function &F) const;
+
+ // (optional) handle to new DivergenceAnalysis
+ std::unique_ptr<GPUDivergenceAnalysis> gpuDA;
+
+ // Stores all divergent values.
+ DenseSet<const Value *> DivergentValues;
+};
+} // End llvm namespace
+
+#endif //LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H
diff --git a/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index d27b3e42bbeb..4ed00e207753 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -97,6 +97,19 @@ public:
/// Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
+ /// Type to keep track of the status of the dependence check. The order of
+ /// the elements is important and has to be from most permissive to least
+ /// permissive.
+ enum class VectorizationSafetyStatus {
+ // Can vectorize safely without RT checks. All dependences are known to be
+ // safe.
+ Safe,
+ // Can possibly vectorize with RT checks to overcome unknown dependencies.
+ PossiblySafeWithRtChecks,
+ // Cannot vectorize due to known unsafe dependencies.
+ Unsafe,
+ };
+
/// Dependece between memory access instructions.
struct Dependence {
/// The type of the dependence.
@@ -146,7 +159,7 @@ public:
Instruction *getDestination(const LoopAccessInfo &LAI) const;
/// Dependence types that don't prevent vectorization.
- static bool isSafeForVectorization(DepType Type);
+ static VectorizationSafetyStatus isSafeForVectorization(DepType Type);
/// Lexically forward dependence.
bool isForward() const;
@@ -164,8 +177,8 @@ public:
MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
: PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeRegisterWidth(-1U),
- ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
- RecordDependences(true) {}
+ FoundNonConstantDistanceDependence(false),
+ Status(VectorizationSafetyStatus::Safe), RecordDependences(true) {}
/// Register the location (instructions are given increasing numbers)
/// of a write access.
@@ -193,7 +206,9 @@ public:
/// No memory dependence was encountered that would inhibit
/// vectorization.
- bool isSafeForVectorization() const { return SafeForVectorization; }
+ bool isSafeForVectorization() const {
+ return Status == VectorizationSafetyStatus::Safe;
+ }
/// The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
@@ -205,7 +220,10 @@ public:
/// In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
- bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
+ bool shouldRetryWithRuntimeCheck() const {
+ return FoundNonConstantDistanceDependence &&
+ Status == VectorizationSafetyStatus::PossiblySafeWithRtChecks;
+ }
/// Returns the memory dependences. If null is returned we exceeded
/// the MaxDependences threshold and this information is not
@@ -267,11 +285,12 @@ private:
/// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
- bool ShouldRetryWithRuntimeCheck;
+ bool FoundNonConstantDistanceDependence;
- /// No memory dependence was encountered that would inhibit
- /// vectorization.
- bool SafeForVectorization;
+ /// Result of the dependence checks, indicating whether the checked
+ /// dependences are safe for vectorization, require RT checks or are known to
+ /// be unsafe.
+ VectorizationSafetyStatus Status;
//// True if Dependences reflects the dependences in the
//// loop. If false we exceeded MaxDependences and
@@ -304,6 +323,11 @@ private:
/// \return false if we shouldn't vectorize at all or avoid larger
/// vectorization factors by limiting MaxSafeDepDistBytes.
bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
+
+ /// Updates the current safety status with \p S. We can go from Safe to
+ /// either PossiblySafeWithRtChecks or Unsafe and from
+ /// PossiblySafeWithRtChecks to Unsafe.
+ void mergeInStatus(VectorizationSafetyStatus S);
};
/// Holds information about the memory runtime legality checks to verify
@@ -564,11 +588,10 @@ public:
/// Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
- /// Checks existence of store to invariant address inside loop.
- /// If the loop has any store to invariant address, then it returns true,
- /// else returns false.
- bool hasStoreToLoopInvariantAddress() const {
- return StoreToLoopInvariantAddress;
+ /// If the loop has memory dependence involving an invariant address, i.e. two
+ /// stores or a store and a load, then return true, else return false.
+ bool hasDependenceInvolvingLoopInvariantAddress() const {
+ return HasDependenceInvolvingLoopInvariantAddress;
}
/// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
@@ -621,9 +644,8 @@ private:
/// Cache the result of analyzeLoop.
bool CanVecMem;
- /// Indicator for storing to uniform addresses.
- /// If a loop has write to a loop invariant address then it should be true.
- bool StoreToLoopInvariantAddress;
+ /// Indicator that there are non vectorizable stores to a uniform address.
+ bool HasDependenceInvolvingLoopInvariantAddress;
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 30b29d66a1d1..72873546a068 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -408,6 +408,12 @@ public:
/// Verify loop structure of this loop and all nested loops.
void verifyLoopNest(DenseSet<const LoopT *> *Loops) const;
+ /// Returns true if the loop is annotated parallel.
+ ///
+ /// Derived classes can override this method using static template
+ /// polymorphism.
+ bool isAnnotatedParallel() const { return false; }
+
/// Print loop with all the BBs inside it.
void print(raw_ostream &OS, unsigned Depth = 0, bool Verbose = false) const;
@@ -989,6 +995,26 @@ public:
/// Function to print a loop's contents as LLVM's text IR assembly.
void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
+/// Find and return the loop attribute node for the attribute @p Name in
+/// @p LoopID. Return nullptr if there is no such attribute.
+MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);
+
+/// Find string metadata for a loop.
+///
+/// Returns the MDNode where the first operand is the metadata's name. The
+/// following operands are the metadata's values. If no metadata with @p Name is
+/// found, return nullptr.
+MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
+
+/// Return whether an MDNode might represent an access group.
+///
+/// Access group metadata nodes have to be distinct and empty. Being
+/// always-empty ensures that it never needs to be changed (which -- because
+/// MDNodes are designed immutable -- would require creating a new MDNode). Note
+/// that this is not a sufficient condition: not every distinct and empty NDNode
+/// is representing an access group.
+bool isValidAsAccessGroup(MDNode *AccGroup);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
index 941389858868..2b807919fedf 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -392,7 +392,10 @@ void LoopBase<BlockT, LoopT>::verifyLoopNest(
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth,
bool Verbose) const {
- OS.indent(Depth * 2) << "Loop at depth " << getLoopDepth() << " containing: ";
+ OS.indent(Depth * 2);
+ if (static_cast<const LoopT *>(this)->isAnnotatedParallel())
+ OS << "Parallel ";
+ OS << "Loop at depth " << getLoopDepth() << " containing: ";
BlockT *H = getHeader();
for (unsigned i = 0; i < getBlocks().size(); ++i) {
@@ -640,8 +643,8 @@ void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
- llvm::sort(BB1.begin(), BB1.end());
- llvm::sort(BB2.begin(), BB2.end());
+ llvm::sort(BB1);
+ llvm::sort(BB2);
return BB1 == BB2;
}
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 1c40cffc7f67..958d4fe4b832 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -37,7 +37,6 @@
namespace llvm {
class AssumptionCache;
-class CallSite;
class DominatorTree;
class Function;
class Instruction;
@@ -304,7 +303,7 @@ private:
/// The maximum size of the dereferences of the pointer.
///
/// May be UnknownSize if the sizes are unknown.
- LocationSize Size = MemoryLocation::UnknownSize;
+ LocationSize Size = LocationSize::unknown();
/// The AA tags associated with dereferences of the pointer.
///
/// The members may be null if there are no tags or conflicting tags.
@@ -398,7 +397,7 @@ public:
/// invalidated on the next non-local query or when an instruction is
/// removed. Clients must copy this data if they want it around longer than
/// that.
- const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
+ const NonLocalDepInfo &getNonLocalCallDependency(CallBase *QueryCall);
/// Perform a full dependency query for an access to the QueryInst's
/// specified memory location, returning the set of instructions that either
@@ -482,9 +481,9 @@ public:
void releaseMemory();
private:
- MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
- BasicBlock::iterator ScanIt,
- BasicBlock *BB);
+ MemDepResult getCallDependencyFrom(CallBase *Call, bool isReadOnlyCall,
+ BasicBlock::iterator ScanIt,
+ BasicBlock *BB);
bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
const PHITransAddr &Pointer,
const MemoryLocation &Loc, bool isLoad,
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryLocation.h b/contrib/llvm/include/llvm/Analysis/MemoryLocation.h
index 6b680000312c..fca18c1b5999 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -16,9 +16,9 @@
#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
#define LLVM_ANALYSIS_MEMORYLOCATION_H
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
namespace llvm {
@@ -34,8 +34,134 @@ class AnyMemIntrinsic;
class TargetLibraryInfo;
// Represents the size of a MemoryLocation. Logically, it's an
-// Optional<uint64_t>, with a special UnknownSize value from `MemoryLocation`.
-using LocationSize = uint64_t;
+// Optional<uint63_t> that also carries a bit to represent whether the integer
+// it contains, N, is 'precise'. Precise, in this context, means that we know
+// that the area of storage referenced by the given MemoryLocation must be
+// precisely N bytes. An imprecise value is formed as the union of two or more
+// precise values, and can conservatively represent all of the values unioned
+// into it. Importantly, imprecise values are an *upper-bound* on the size of a
+// MemoryLocation.
+//
+// Concretely, a precise MemoryLocation is (%p, 4) in
+// store i32 0, i32* %p
+//
+// Since we know that %p must be at least 4 bytes large at this point.
+// Otherwise, we have UB. An example of an imprecise MemoryLocation is (%p, 4)
+// at the memcpy in
+//
+// %n = select i1 %foo, i64 1, i64 4
+// call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %baz, i64 %n, i32 1,
+// i1 false)
+//
+// ...Since we'll copy *up to* 4 bytes into %p, but we can't guarantee that
+// we'll ever actually do so.
+//
+// If asked to represent a pathologically large value, this will degrade to
+// None.
+class LocationSize {
+ enum : uint64_t {
+ Unknown = ~uint64_t(0),
+ ImpreciseBit = uint64_t(1) << 63,
+ MapEmpty = Unknown - 1,
+ MapTombstone = Unknown - 2,
+
+ // The maximum value we can represent without falling back to 'unknown'.
+ MaxValue = (MapTombstone - 1) & ~ImpreciseBit,
+ };
+
+ uint64_t Value;
+
+ // Hack to support implicit construction. This should disappear when the
+ // public LocationSize ctor goes away.
+ enum DirectConstruction { Direct };
+
+ constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {}
+
+ static_assert(Unknown & ImpreciseBit, "Unknown is imprecise by definition.");
+public:
+ // FIXME: Migrate all users to construct via either `precise` or `upperBound`,
+ // to make it more obvious at the callsite the kind of size that they're
+ // providing.
+ //
+ // Since the overwhelming majority of users of this provide precise values,
+ // this assumes the provided value is precise.
+ constexpr LocationSize(uint64_t Raw)
+ : Value(Raw > MaxValue ? Unknown : Raw) {}
+
+ static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
+
+ static LocationSize upperBound(uint64_t Value) {
+ // You can't go lower than 0, so give a precise result.
+ if (LLVM_UNLIKELY(Value == 0))
+ return precise(0);
+ if (LLVM_UNLIKELY(Value > MaxValue))
+ return unknown();
+ return LocationSize(Value | ImpreciseBit, Direct);
+ }
+
+ constexpr static LocationSize unknown() {
+ return LocationSize(Unknown, Direct);
+ }
+
+ // Sentinel values, generally used for maps.
+ constexpr static LocationSize mapTombstone() {
+ return LocationSize(MapTombstone, Direct);
+ }
+ constexpr static LocationSize mapEmpty() {
+ return LocationSize(MapEmpty, Direct);
+ }
+
+ // Returns a LocationSize that can correctly represent either `*this` or
+ // `Other`.
+ LocationSize unionWith(LocationSize Other) const {
+ if (Other == *this)
+ return *this;
+
+ if (!hasValue() || !Other.hasValue())
+ return unknown();
+
+ return upperBound(std::max(getValue(), Other.getValue()));
+ }
+
+ bool hasValue() const { return Value != Unknown; }
+ uint64_t getValue() const {
+ assert(hasValue() && "Getting value from an unknown LocationSize!");
+ return Value & ~ImpreciseBit;
+ }
+
+ // Returns whether or not this value is precise. Note that if a value is
+ // precise, it's guaranteed to not be `unknown()`.
+ bool isPrecise() const {
+ return (Value & ImpreciseBit) == 0;
+ }
+
+ // Convenience method to check if this LocationSize's value is 0.
+ bool isZero() const { return hasValue() && getValue() == 0; }
+
+ bool operator==(const LocationSize &Other) const {
+ return Value == Other.Value;
+ }
+
+ bool operator!=(const LocationSize &Other) const {
+ return !(*this == Other);
+ }
+
+ // Ordering operators are not provided, since it's unclear if there's only one
+ // reasonable way to compare:
+ // - values that don't exist against values that do, and
+ // - precise values to imprecise values
+
+ void print(raw_ostream &OS) const;
+
+ // Returns an opaque value that represents this LocationSize. Cannot be
+ // reliably converted back into a LocationSize.
+ uint64_t toRaw() const { return Value; }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, LocationSize Size) {
+ Size.print(OS);
+ return OS;
+}
/// Representation for a specific memory location.
///
@@ -108,11 +234,15 @@ public:
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
/// Return a location representing a particular argument of a call.
- static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
- const TargetLibraryInfo &TLI);
+ static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
+ const TargetLibraryInfo *TLI);
+ static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
+ const TargetLibraryInfo &TLI) {
+ return getForArgument(Call, ArgIdx, &TLI);
+ }
explicit MemoryLocation(const Value *Ptr = nullptr,
- LocationSize Size = UnknownSize,
+ LocationSize Size = LocationSize::unknown(),
const AAMDNodes &AATags = AAMDNodes())
: Ptr(Ptr), Size(Size), AATags(AATags) {}
@@ -139,13 +269,30 @@ public:
}
};
-// Specialize DenseMapInfo for MemoryLocation.
+// Specialize DenseMapInfo.
+template <> struct DenseMapInfo<LocationSize> {
+ static inline LocationSize getEmptyKey() {
+ return LocationSize::mapEmpty();
+ }
+ static inline LocationSize getTombstoneKey() {
+ return LocationSize::mapTombstone();
+ }
+ static unsigned getHashValue(const LocationSize &Val) {
+ return DenseMapInfo<uint64_t>::getHashValue(Val.toRaw());
+ }
+ static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) {
+ return LHS == RHS;
+ }
+};
+
template <> struct DenseMapInfo<MemoryLocation> {
static inline MemoryLocation getEmptyKey() {
- return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), 0);
+ return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(),
+ DenseMapInfo<LocationSize>::getEmptyKey());
}
static inline MemoryLocation getTombstoneKey() {
- return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), 0);
+ return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(),
+ DenseMapInfo<LocationSize>::getTombstoneKey());
}
static unsigned getHashValue(const MemoryLocation &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
diff --git a/contrib/llvm/include/llvm/Analysis/MemorySSA.h b/contrib/llvm/include/llvm/Analysis/MemorySSA.h
index d445e4430e5c..17e2d0c73977 100644
--- a/contrib/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/contrib/llvm/include/llvm/Analysis/MemorySSA.h
@@ -280,9 +280,10 @@ protected:
friend class MemorySSAUpdater;
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
- DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
- : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInstruction(MI),
- OptimizedAccessAlias(MayAlias) {
+ DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
+ unsigned NumOperands)
+ : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
+ MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) {
setDefiningAccess(DMA);
}
@@ -308,11 +309,6 @@ private:
Optional<AliasResult> OptimizedAccessAlias;
};
-template <>
-struct OperandTraits<MemoryUseOrDef>
- : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
-
/// Represents read-only accesses to memory
///
/// In particular, the set of Instructions that will be represented by
@@ -323,7 +319,8 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
- : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
+ : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
+ /*NumOperands=*/1) {}
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
@@ -381,31 +378,33 @@ public:
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
unsigned Ver)
- : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
+ : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
+ /*NumOperands=*/2),
+ ID(Ver) {}
- // allocate space for exactly one operand
- void *operator new(size_t s) { return User::operator new(s, 1); }
+ // allocate space for exactly two operands
+ void *operator new(size_t s) { return User::operator new(s, 2); }
static bool classof(const Value *MA) {
return MA->getValueID() == MemoryDefVal;
}
void setOptimized(MemoryAccess *MA) {
- Optimized = MA;
- OptimizedID = getDefiningAccess()->getID();
+ setOperand(1, MA);
+ OptimizedID = MA->getID();
}
MemoryAccess *getOptimized() const {
- return cast_or_null<MemoryAccess>(Optimized);
+ return cast_or_null<MemoryAccess>(getOperand(1));
}
bool isOptimized() const {
- return getOptimized() && getDefiningAccess() &&
- OptimizedID == getDefiningAccess()->getID();
+ return getOptimized() && OptimizedID == getOptimized()->getID();
}
void resetOptimized() {
OptimizedID = INVALID_MEMORYACCESS_ID;
+ setOperand(1, nullptr);
}
void print(raw_ostream &OS) const;
@@ -417,13 +416,34 @@ private:
const unsigned ID;
unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
- WeakVH Optimized;
};
template <>
-struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
+struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
+template <>
+struct OperandTraits<MemoryUseOrDef> {
+ static Use *op_begin(MemoryUseOrDef *MUD) {
+ if (auto *MU = dyn_cast<MemoryUse>(MUD))
+ return OperandTraits<MemoryUse>::op_begin(MU);
+ return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
+ }
+
+ static Use *op_end(MemoryUseOrDef *MUD) {
+ if (auto *MU = dyn_cast<MemoryUse>(MUD))
+ return OperandTraits<MemoryUse>::op_end(MU);
+ return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
+ }
+
+ static unsigned operands(const MemoryUseOrDef *MUD) {
+ if (const auto *MU = dyn_cast<MemoryUse>(MUD))
+ return OperandTraits<MemoryUse>::operands(MU);
+ return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
+ }
+};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
+
/// Represents phi nodes for memory accesses.
///
/// These have the same semantic as regular phi nodes, with the exception that
@@ -684,13 +704,19 @@ public:
~MemorySSA();
MemorySSAWalker *getWalker();
+ MemorySSAWalker *getSkipSelfWalker();
/// Given a memory Mod/Ref'ing instruction, get the MemorySSA
/// access associated with it. If passed a basic block gets the memory phi
/// node that exists for that block, if there is one. Otherwise, this will get
/// a MemoryUseOrDef.
- MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
- MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
+ MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
+ return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
+ }
+
+ MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
+ return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
+ }
void dump() const;
void print(raw_ostream &) const;
@@ -750,6 +776,9 @@ public:
/// all uses, uses appear in the right places). This is used by unit tests.
void verifyMemorySSA() const;
+ /// Check clobber sanity for an access.
+ void checkClobberSanityAccess(const MemoryAccess *MA) const;
+
/// Used in various insertion functions to specify whether we are talking
/// about the beginning or end of a block.
enum InsertionPlace { Beginning, End };
@@ -764,6 +793,7 @@ protected:
void verifyDomination(Function &F) const;
void verifyOrdering(Function &F) const;
void verifyDominationNumbers(const Function &F) const;
+ void verifyClobberSanity(const Function &F) const;
// This is used by the use optimizer and updater.
AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
@@ -796,16 +826,20 @@ protected:
InsertionPlace);
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
AccessList::iterator);
- MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
+ MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
+ const MemoryUseOrDef *Template = nullptr);
private:
+ class ClobberWalkerBase;
class CachingWalker;
+ class SkipSelfWalker;
class OptimizeUses;
CachingWalker *getWalkerImpl();
void buildMemorySSA();
void optimizeUses();
+ void prepareForMoveTo(MemoryAccess *, BasicBlock *);
void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
@@ -816,7 +850,8 @@ private:
void markUnreachableAsLiveOnEntry(BasicBlock *BB);
bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
MemoryPhi *createMemoryPhi(BasicBlock *BB);
- MemoryUseOrDef *createNewAccess(Instruction *);
+ MemoryUseOrDef *createNewAccess(Instruction *,
+ const MemoryUseOrDef *Template = nullptr);
MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
@@ -851,7 +886,9 @@ private:
mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
// Memory SSA building info
+ std::unique_ptr<ClobberWalkerBase> WalkerBase;
std::unique_ptr<CachingWalker> Walker;
+ std::unique_ptr<SkipSelfWalker> SkipWalker;
unsigned NextID;
};
diff --git a/contrib/llvm/include/llvm/Analysis/MemorySSAUpdater.h b/contrib/llvm/include/llvm/Analysis/MemorySSAUpdater.h
index 38f08c1eebdc..169d5bd9fa8b 100644
--- a/contrib/llvm/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/contrib/llvm/include/llvm/Analysis/MemorySSAUpdater.h
@@ -35,8 +35,11 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFGDiff.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/OperandTraits.h"
@@ -45,6 +48,7 @@
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -57,6 +61,12 @@ class MemoryAccess;
class LLVMContext;
class raw_ostream;
+using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
+using PhiToDefMap = SmallDenseMap<MemoryPhi *, MemoryAccess *>;
+using CFGUpdate = cfg::Update<BasicBlock *>;
+using GraphDiffInvBBPair =
+ std::pair<const GraphDiff<BasicBlock *> *, Inverse<BasicBlock *>>;
+
class MemorySSAUpdater {
private:
MemorySSA *MSSA;
@@ -70,6 +80,7 @@ private:
public:
MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
+
/// Insert a definition into the MemorySSA IR. RenameUses will rename any use
/// below the new def block (and any inserted phis). RenameUses should be set
/// to true if the definition may cause new aliases for loads below it. This
@@ -89,15 +100,48 @@ public:
/// Where a mayalias b, *does* require RenameUses be set to true.
void insertDef(MemoryDef *Def, bool RenameUses = false);
void insertUse(MemoryUse *Use);
+ /// Update the MemoryPhi in `To` following an edge deletion between `From` and
+ /// `To`. If `To` becomes unreachable, a call to removeBlocks should be made.
+ void removeEdge(BasicBlock *From, BasicBlock *To);
+ /// Update the MemoryPhi in `To` to have a single incoming edge from `From`,
+ /// following a CFG change that replaced multiple edges (switch) with a direct
+ /// branch.
+ void removeDuplicatePhiEdgesBetween(BasicBlock *From, BasicBlock *To);
+ /// Update MemorySSA after a loop was cloned, given the blocks in RPO order,
+ /// the exit blocks and a 1:1 mapping of all blocks and instructions
+ /// cloned. This involves duplicating all defs and uses in the cloned blocks
+ /// Updating phi nodes in exit block successors is done separately.
+ void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
+ ArrayRef<BasicBlock *> ExitBlocks,
+ const ValueToValueMapTy &VM,
+ bool IgnoreIncomingWithNoClones = false);
+ // Block BB was fully or partially cloned into its predecessor P1. Map
+ // contains the 1:1 mapping of instructions cloned and VM[BB]=P1.
+ void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1,
+ const ValueToValueMapTy &VM);
+ /// Update phi nodes in exit block successors following cloning. Exit blocks
+ /// that were not cloned don't have additional predecessors added.
+ void updateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
+ const ValueToValueMapTy &VMap,
+ DominatorTree &DT);
+ void updateExitBlocksForClonedLoop(
+ ArrayRef<BasicBlock *> ExitBlocks,
+ ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT);
+
+ /// Apply CFG updates, analogous with the DT edge updates.
+ void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
+ /// Apply CFG insert updates, analogous with the DT edge updates.
+ void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
+
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
MemorySSA::InsertionPlace Where);
- /// `From` block was spliced into `From` and `To`.
- /// Move all accesses from `From` to `To` starting at instruction `Start`.
- /// `To` is newly created BB, so empty of MemorySSA::MemoryAccesses.
- /// Edges are already updated, so successors of `To` with MPhi nodes need to
- /// update incoming block.
+ /// `From` block was spliced into `From` and `To`. There is a CFG edge from
+ /// `From` to `To`. Move all accesses from `From` to `To` starting at
+ /// instruction `Start`. `To` is newly created BB, so empty of
+ /// MemorySSA::MemoryAccesses. Edges are already updated, so successors of
+ /// `To` with MPhi nodes need to update incoming block.
/// |------| |------|
/// | From | | From |
/// | | |------|
@@ -108,12 +152,12 @@ public:
/// |------| |------|
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
Instruction *Start);
- /// `From` block was merged into `To`. All instructions were moved and
- /// `From` is an empty block with successor edges; `From` is about to be
- /// deleted. Move all accesses from `From` to `To` starting at instruction
- /// `Start`. `To` may have multiple successors, `From` has a single
- /// predecessor. `From` may have successors with MPhi nodes, replace their
- /// incoming block with `To`.
+ /// `From` block was merged into `To`. There is a CFG edge from `To` to
+ /// `From`.`To` still branches to `From`, but all instructions were moved and
+ /// `From` is now an empty block; `From` is about to be deleted. Move all
+ /// accesses from `From` to `To` starting at instruction `Start`. `To` may
+ /// have multiple successors, `From` has a single predecessor. `From` may have
+ /// successors with MPhi nodes, replace their incoming block with `To`.
/// |------| |------|
/// | To | | To |
/// |------| | |
@@ -124,15 +168,14 @@ public:
/// |------| |------|
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
Instruction *Start);
- /// BasicBlock Old had New, an empty BasicBlock, added directly before it,
- /// and the predecessors in Preds that used to point to Old, now point to
- /// New. If New is the only predecessor, move Old's Phi, if present, to New.
+ /// A new empty BasicBlock (New) now branches directly to Old. Some of
+ /// Old's predecessors (Preds) are now branching to New instead of Old.
+ /// If New is the only predecessor, move Old's Phi, if present, to New.
/// Otherwise, add a new Phi in New with appropriate incoming values, and
/// update the incoming values in Old's Phi node too, if present.
- void
- wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New,
- ArrayRef<BasicBlock *> Preds);
-
+ void wireOldPredecessorsToNewImmediatePredecessor(
+ BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
+ bool IdenticalEdgesWereMerged = true);
// The below are utility functions. Other than creation of accesses to pass
// to insertDef, and removeAccess to remove accesses, you should generally
// not attempt to update memoryssa yourself. It is very non-trivial to get
@@ -220,6 +263,23 @@ private:
template <class RangeType>
MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
void fixupDefs(const SmallVectorImpl<WeakVH> &);
+ // Clone all uses and defs from BB to NewBB given a 1:1 map of all
+ // instructions and blocks cloned, and a map of MemoryPhi : Definition
+ // (MemoryAccess Phi or Def). VMap maps old instructions to cloned
+ // instructions and old blocks to cloned blocks. MPhiMap, is created in the
+ // caller of this private method, and maps existing MemoryPhis to new
+ // definitions that new MemoryAccesses must point to. These definitions may
+ // not necessarily be MemoryPhis themselves, they may be MemoryDefs. As such,
+ // the map is between MemoryPhis and MemoryAccesses, where the MemoryAccesses
+ // may be MemoryPhis or MemoryDefs and not MemoryUses.
+ void cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
+ const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap);
+ template <typename Iter>
+ void privateUpdateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
+ Iter ValuesBegin, Iter ValuesEnd,
+ DominatorTree &DT);
+ void applyInsertUpdates(ArrayRef<CFGUpdate>, DominatorTree &DT,
+ const GraphDiff<BasicBlock *> *GD);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Analysis/MustExecute.h b/contrib/llvm/include/llvm/Analysis/MustExecute.h
index 97ad76d451ca..ad3222c17e62 100644
--- a/contrib/llvm/include/llvm/Analysis/MustExecute.h
+++ b/contrib/llvm/include/llvm/Analysis/MustExecute.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/InstructionPrecedenceTracking.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
@@ -31,33 +32,138 @@ class DominatorTree;
class Loop;
/// Captures loop safety information.
-/// It keep information for loop & its header may throw exception or otherwise
+/// It keep information for loop blocks may throw exception or otherwise
/// exit abnormaly on any iteration of the loop which might actually execute
/// at runtime. The primary way to consume this infromation is via
/// isGuaranteedToExecute below, but some callers bailout or fallback to
/// alternate reasoning if a loop contains any implicit control flow.
-struct LoopSafetyInfo {
- bool MayThrow = false; // The current loop contains an instruction which
- // may throw.
- bool HeaderMayThrow = false; // Same as previous, but specific to loop header
+/// NOTE: LoopSafetyInfo contains cached information regarding loops and their
+/// particular blocks. This information is only dropped on invocation of
+/// computeLoopSafetyInfo. If the loop or any of its block is deleted, or if
+/// any thrower instructions have been added or removed from them, or if the
+/// control flow has changed, or in case of other meaningful modifications, the
+/// LoopSafetyInfo needs to be recomputed. If a meaningful modifications to the
+/// loop were made and the info wasn't recomputed properly, the behavior of all
+/// methods except for computeLoopSafetyInfo is undefined.
+class LoopSafetyInfo {
// Used to update funclet bundle operands.
DenseMap<BasicBlock *, ColorVector> BlockColors;
+protected:
+ /// Computes block colors.
+ void computeBlockColors(const Loop *CurLoop);
+
+public:
+ /// Returns block colors map that is used to update funclet operand bundles.
+ const DenseMap<BasicBlock *, ColorVector> &getBlockColors() const;
+
+ /// Copy colors of block \p Old into the block \p New.
+ void copyColors(BasicBlock *New, BasicBlock *Old);
+
+ /// Returns true iff the block \p BB potentially may throw exception. It can
+ /// be false-positive in cases when we want to avoid complex analysis.
+ virtual bool blockMayThrow(const BasicBlock *BB) const = 0;
+
+ /// Returns true iff any block of the loop for which this info is contains an
+ /// instruction that may throw or otherwise exit abnormally.
+ virtual bool anyBlockMayThrow() const = 0;
+
+ /// Return true if we must reach the block \p BB under assumption that the
+ /// loop \p CurLoop is entered.
+ bool allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB,
+ const DominatorTree *DT) const;
+
+ /// Computes safety information for a loop checks loop body & header for
+ /// the possibility of may throw exception, it takes LoopSafetyInfo and loop
+ /// as argument. Updates safety information in LoopSafetyInfo argument.
+ /// Note: This is defined to clear and reinitialize an already initialized
+ /// LoopSafetyInfo. Some callers rely on this fact.
+ virtual void computeLoopSafetyInfo(const Loop *CurLoop) = 0;
+
+ /// Returns true if the instruction in a loop is guaranteed to execute at
+ /// least once (under the assumption that the loop is entered).
+ virtual bool isGuaranteedToExecute(const Instruction &Inst,
+ const DominatorTree *DT,
+ const Loop *CurLoop) const = 0;
+
LoopSafetyInfo() = default;
+
+ virtual ~LoopSafetyInfo() = default;
};
-/// Computes safety information for a loop checks loop body & header for
-/// the possibility of may throw exception, it takes LoopSafetyInfo and loop as
-/// argument. Updates safety information in LoopSafetyInfo argument.
-/// Note: This is defined to clear and reinitialize an already initialized
-/// LoopSafetyInfo. Some callers rely on this fact.
-void computeLoopSafetyInfo(LoopSafetyInfo *, Loop *);
-
-/// Returns true if the instruction in a loop is guaranteed to execute at least
-/// once (under the assumption that the loop is entered).
-bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT,
- const Loop *CurLoop,
- const LoopSafetyInfo *SafetyInfo);
+
+/// Simple and conservative implementation of LoopSafetyInfo that can give
+/// false-positive answers to its queries in order to avoid complicated
+/// analysis.
+class SimpleLoopSafetyInfo: public LoopSafetyInfo {
+ bool MayThrow = false; // The current loop contains an instruction which
+ // may throw.
+ bool HeaderMayThrow = false; // Same as previous, but specific to loop header
+
+public:
+ virtual bool blockMayThrow(const BasicBlock *BB) const;
+
+ virtual bool anyBlockMayThrow() const;
+
+ virtual void computeLoopSafetyInfo(const Loop *CurLoop);
+
+ virtual bool isGuaranteedToExecute(const Instruction &Inst,
+ const DominatorTree *DT,
+ const Loop *CurLoop) const;
+
+ SimpleLoopSafetyInfo() : LoopSafetyInfo() {};
+
+ virtual ~SimpleLoopSafetyInfo() {};
+};
+
+/// This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to
+/// give precise answers on "may throw" queries. This implementation uses cache
+/// that should be invalidated by calling the methods insertInstructionTo and
+/// removeInstruction whenever we modify a basic block's contents by adding or
+/// removing instructions.
+class ICFLoopSafetyInfo: public LoopSafetyInfo {
+ bool MayThrow = false; // The current loop contains an instruction which
+ // may throw.
+ // Contains information about implicit control flow in this loop's blocks.
+ mutable ImplicitControlFlowTracking ICF;
+ // Contains information about instruction that may possibly write memory.
+ mutable MemoryWriteTracking MW;
+
+public:
+ virtual bool blockMayThrow(const BasicBlock *BB) const;
+
+ virtual bool anyBlockMayThrow() const;
+
+ virtual void computeLoopSafetyInfo(const Loop *CurLoop);
+
+ virtual bool isGuaranteedToExecute(const Instruction &Inst,
+ const DominatorTree *DT,
+ const Loop *CurLoop) const;
+
+ /// Returns true if we could not execute a memory-modifying instruction before
+ /// we enter \p BB under assumption that \p CurLoop is entered.
+ bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop)
+ const;
+
+ /// Returns true if we could not execute a memory-modifying instruction before
+ /// we execute \p I under assumption that \p CurLoop is entered.
+ bool doesNotWriteMemoryBefore(const Instruction &I, const Loop *CurLoop)
+ const;
+
+ /// Inform the safety info that we are planning to insert a new instruction
+ /// \p Inst into the basic block \p BB. It will make all cache updates to keep
+ /// it correct after this insertion.
+ void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
+
+ /// Inform safety info that we are planning to remove the instruction \p Inst
+ /// from its block. It will make all cache updates to keep it correct after
+ /// this removal.
+ void removeInstruction(const Instruction *Inst);
+
+ ICFLoopSafetyInfo(DominatorTree *DT) : LoopSafetyInfo(), ICF(DT), MW(DT) {};
+
+ virtual ~ICFLoopSafetyInfo() {};
+};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
index 559c77c30811..58a67042ea2d 100644
--- a/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -60,7 +60,7 @@ public:
FunctionModRefBehavior getModRefBehavior(const Function *F);
using AAResultBase::getModRefInfo;
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
};
/// Analysis pass providing a never-invalidated alias analysis result.
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 07beb0bb60a3..1f497fab35da 100644
--- a/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -51,25 +51,25 @@ extern bool EnableARCOpts;
/// on.
inline bool ModuleHasARC(const Module &M) {
return
- M.getNamedValue("objc_retain") ||
- M.getNamedValue("objc_release") ||
- M.getNamedValue("objc_autorelease") ||
- M.getNamedValue("objc_retainAutoreleasedReturnValue") ||
- M.getNamedValue("objc_unsafeClaimAutoreleasedReturnValue") ||
- M.getNamedValue("objc_retainBlock") ||
- M.getNamedValue("objc_autoreleaseReturnValue") ||
- M.getNamedValue("objc_autoreleasePoolPush") ||
- M.getNamedValue("objc_loadWeakRetained") ||
- M.getNamedValue("objc_loadWeak") ||
- M.getNamedValue("objc_destroyWeak") ||
- M.getNamedValue("objc_storeWeak") ||
- M.getNamedValue("objc_initWeak") ||
- M.getNamedValue("objc_moveWeak") ||
- M.getNamedValue("objc_copyWeak") ||
- M.getNamedValue("objc_retainedObject") ||
- M.getNamedValue("objc_unretainedObject") ||
- M.getNamedValue("objc_unretainedPointer") ||
- M.getNamedValue("clang.arc.use");
+ M.getNamedValue("llvm.objc.retain") ||
+ M.getNamedValue("llvm.objc.release") ||
+ M.getNamedValue("llvm.objc.autorelease") ||
+ M.getNamedValue("llvm.objc.retainAutoreleasedReturnValue") ||
+ M.getNamedValue("llvm.objc.unsafeClaimAutoreleasedReturnValue") ||
+ M.getNamedValue("llvm.objc.retainBlock") ||
+ M.getNamedValue("llvm.objc.autoreleaseReturnValue") ||
+ M.getNamedValue("llvm.objc.autoreleasePoolPush") ||
+ M.getNamedValue("llvm.objc.loadWeakRetained") ||
+ M.getNamedValue("llvm.objc.loadWeak") ||
+ M.getNamedValue("llvm.objc.destroyWeak") ||
+ M.getNamedValue("llvm.objc.storeWeak") ||
+ M.getNamedValue("llvm.objc.initWeak") ||
+ M.getNamedValue("llvm.objc.moveWeak") ||
+ M.getNamedValue("llvm.objc.copyWeak") ||
+ M.getNamedValue("llvm.objc.retainedObject") ||
+ M.getNamedValue("llvm.objc.unretainedObject") ||
+ M.getNamedValue("llvm.objc.unretainedPointer") ||
+ M.getNamedValue("llvm.objc.clang.arc.use");
}
/// This is a wrapper around getUnderlyingObject which also knows how to
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h b/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h
index 0b92d8b48356..018ea1f851be 100644
--- a/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h
@@ -11,6 +11,7 @@
#define LLVM_ANALYSIS_OBJCARCINSTKIND_H
#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Instructions.h"
namespace llvm {
@@ -48,7 +49,7 @@ enum class ARCInstKind {
CopyWeak, ///< objc_copyWeak (derived)
DestroyWeak, ///< objc_destroyWeak (derived)
StoreStrong, ///< objc_storeStrong (derived)
- IntrinsicUser, ///< clang.arc.use
+ IntrinsicUser, ///< llvm.objc.clang.arc.use
CallOrUser, ///< could call objc_release and/or "use" pointers
Call, ///< could call objc_release
User, ///< could "use" a pointer
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/OrderedInstructions.h b/contrib/llvm/include/llvm/Analysis/OrderedInstructions.h
index 7f57fde638b8..7e3850b87c57 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/OrderedInstructions.h
+++ b/contrib/llvm/include/llvm/Analysis/OrderedInstructions.h
@@ -17,8 +17,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
-#define LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+#ifndef LLVM_ANALYSIS_ORDEREDINSTRUCTIONS_H
+#define LLVM_ANALYSIS_ORDEREDINSTRUCTIONS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/OrderedBasicBlock.h"
@@ -62,4 +62,4 @@ public:
} // end namespace llvm
-#endif // LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+#endif // LLVM_ANALYSIS_ORDEREDINSTRUCTIONS_H
diff --git a/contrib/llvm/include/llvm/Analysis/Passes.h b/contrib/llvm/include/llvm/Analysis/Passes.h
index 09b28a0b0884..081dd5000835 100644
--- a/contrib/llvm/include/llvm/Analysis/Passes.h
+++ b/contrib/llvm/include/llvm/Analysis/Passes.h
@@ -61,10 +61,10 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
- // createDivergenceAnalysisPass - This pass determines which branches in a GPU
+ // createLegacyDivergenceAnalysisPass - This pass determines which branches in a GPU
// program are divergent.
//
- FunctionPass *createDivergenceAnalysisPass();
+ FunctionPass *createLegacyDivergenceAnalysisPass();
//===--------------------------------------------------------------------===//
//
diff --git a/contrib/llvm/include/llvm/Analysis/PhiValues.h b/contrib/llvm/include/llvm/Analysis/PhiValues.h
index 6607b329c04f..76204ac1bc6c 100644
--- a/contrib/llvm/include/llvm/Analysis/PhiValues.h
+++ b/contrib/llvm/include/llvm/Analysis/PhiValues.h
@@ -88,6 +88,22 @@ private:
/// All values reachable from each component.
DenseMap<unsigned int, ConstValueSet> ReachableMap;
+ /// A CallbackVH to notify PhiValues when a value is deleted or replaced, so
+ /// that the cached information for that value can be cleared to avoid
+ /// dangling pointers to invalid values.
+ class PhiValuesCallbackVH final : public CallbackVH {
+ PhiValues *PV;
+ void deleted() override;
+ void allUsesReplacedWith(Value *New) override;
+
+ public:
+ PhiValuesCallbackVH(Value *V, PhiValues *PV = nullptr)
+ : CallbackVH(V), PV(PV) {}
+ };
+
+ /// A set of callbacks to the values that processPhi has seen.
+ DenseSet<PhiValuesCallbackVH, DenseMapInfo<Value *>> TrackedValues;
+
/// The function that the PhiValues is for.
const Function &F;
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
index 58b67e74ba51..3aef4be72d71 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -98,14 +98,14 @@ public:
bool isFunctionEntryCold(const Function *F);
/// Returns true if \p F contains only cold code.
bool isFunctionColdInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
- /// Returns true if \p F is a hot function.
+ /// Returns true if count \p C is considered hot.
bool isHotCount(uint64_t C);
/// Returns true if count \p C is considered cold.
bool isColdCount(uint64_t C);
- /// Returns true if BasicBlock \p B is considered hot.
- bool isHotBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
- /// Returns true if BasicBlock \p B is considered cold.
- bool isColdBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
+ /// Returns true if BasicBlock \p BB is considered hot.
+ bool isHotBlock(const BasicBlock *BB, BlockFrequencyInfo *BFI);
+ /// Returns true if BasicBlock \p BB is considered cold.
+ bool isColdBlock(const BasicBlock *BB, BlockFrequencyInfo *BFI);
/// Returns true if CallSite \p CS is considered hot.
bool isHotCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
/// Returns true if Callsite \p CS is considered cold.
@@ -134,9 +134,8 @@ public:
static char ID;
ProfileSummaryInfoWrapperPass();
- ProfileSummaryInfo *getPSI() {
- return &*PSI;
- }
+ ProfileSummaryInfo &getPSI() { return *PSI; }
+ const ProfileSummaryInfo &getPSI() const { return *PSI; }
bool doInitialization(Module &M) override;
bool doFinalization(Module &M) override;
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 89918e3c205b..8f4200b07e5c 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1833,6 +1833,10 @@ private:
const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags);
+ // Get addrec expr already created or create a new one.
+ const SCEV *getOrCreateAddRecExpr(SmallVectorImpl<const SCEV *> &Ops,
+ const Loop *L, SCEV::NoWrapFlags Flags);
+
/// Return x if \p Val is f(x) where f is a 1-1 function.
const SCEV *stripInjectiveFunctions(const SCEV *Val) const;
diff --git a/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h b/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
index 508968e16e5d..1356c6e9198a 100644
--- a/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
+++ b/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -16,7 +16,7 @@
#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
@@ -41,8 +41,8 @@ public:
}
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
private:
bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
diff --git a/contrib/llvm/include/llvm/Analysis/SparsePropagation.h b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
index defcf96afb25..02a2e64268b7 100644
--- a/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
+++ b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
@@ -189,12 +189,12 @@ private:
/// getFeasibleSuccessors - Return a vector of booleans to indicate which
/// successors are reachable from a given terminator instruction.
- void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs,
+ void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs,
bool AggressiveUndef);
void visitInst(Instruction &I);
void visitPHINode(PHINode &I);
- void visitTerminatorInst(TerminatorInst &TI);
+ void visitTerminator(Instruction &TI);
};
//===----------------------------------------------------------------------===//
@@ -286,7 +286,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::markEdgeExecutable(
template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getFeasibleSuccessors(
- TerminatorInst &TI, SmallVectorImpl<bool> &Succs, bool AggressiveUndef) {
+ Instruction &TI, SmallVectorImpl<bool> &Succs, bool AggressiveUndef) {
Succs.resize(TI.getNumSuccessors());
if (TI.getNumSuccessors() == 0)
return;
@@ -330,7 +330,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getFeasibleSuccessors(
return;
}
- if (TI.isExceptional()) {
+ if (TI.isExceptionalTerminator()) {
Succs.assign(Succs.size(), true);
return;
}
@@ -374,7 +374,7 @@ template <class LatticeKey, class LatticeVal, class KeyInfo>
bool SparseSolver<LatticeKey, LatticeVal, KeyInfo>::isEdgeFeasible(
BasicBlock *From, BasicBlock *To, bool AggressiveUndef) {
SmallVector<bool, 16> SuccFeasible;
- TerminatorInst *TI = From->getTerminator();
+ Instruction *TI = From->getTerminator();
getFeasibleSuccessors(*TI, SuccFeasible, AggressiveUndef);
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
@@ -385,8 +385,8 @@ bool SparseSolver<LatticeKey, LatticeVal, KeyInfo>::isEdgeFeasible(
}
template <class LatticeKey, class LatticeVal, class KeyInfo>
-void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitTerminatorInst(
- TerminatorInst &TI) {
+void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitTerminator(
+ Instruction &TI) {
SmallVector<bool, 16> SuccFeasible;
getFeasibleSuccessors(TI, SuccFeasible, true);
@@ -465,8 +465,8 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitInst(Instruction &I) {
if (ChangedValue.second != LatticeFunc->getUntrackedVal())
UpdateState(ChangedValue.first, ChangedValue.second);
- if (TerminatorInst *TI = dyn_cast<TerminatorInst>(&I))
- visitTerminatorInst(*TI);
+ if (I.isTerminator())
+ visitTerminator(I);
}
template <class LatticeKey, class LatticeVal, class KeyInfo>
diff --git a/contrib/llvm/include/llvm/Analysis/StackSafetyAnalysis.h b/contrib/llvm/include/llvm/Analysis/StackSafetyAnalysis.h
new file mode 100644
index 000000000000..8a151650a34c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/StackSafetyAnalysis.h
@@ -0,0 +1,120 @@
+//===- StackSafetyAnalysis.h - Stack memory safety analysis -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Stack Safety Analysis detects allocas and arguments with safe access.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_STACKSAFETYANALYSIS_H
+#define LLVM_ANALYSIS_STACKSAFETYANALYSIS_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// Interface to access stack safety analysis results for single function.
+class StackSafetyInfo {
+public:
+ struct FunctionInfo;
+
+private:
+ std::unique_ptr<FunctionInfo> Info;
+
+public:
+ StackSafetyInfo();
+ StackSafetyInfo(FunctionInfo &&Info);
+ StackSafetyInfo(StackSafetyInfo &&);
+ StackSafetyInfo &operator=(StackSafetyInfo &&);
+ ~StackSafetyInfo();
+
+ // TODO: Add useful for client methods.
+ void print(raw_ostream &O) const;
+};
+
+/// StackSafetyInfo wrapper for the new pass manager.
+class StackSafetyAnalysis : public AnalysisInfoMixin<StackSafetyAnalysis> {
+ friend AnalysisInfoMixin<StackSafetyAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ using Result = StackSafetyInfo;
+ StackSafetyInfo run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Printer pass for the \c StackSafetyAnalysis results.
+class StackSafetyPrinterPass : public PassInfoMixin<StackSafetyPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit StackSafetyPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// StackSafetyInfo wrapper for the legacy pass manager
+class StackSafetyInfoWrapperPass : public FunctionPass {
+ StackSafetyInfo SSI;
+
+public:
+ static char ID;
+ StackSafetyInfoWrapperPass();
+
+ const StackSafetyInfo &getResult() const { return SSI; }
+
+ void print(raw_ostream &O, const Module *M) const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+};
+
+using StackSafetyGlobalInfo = std::map<const GlobalValue *, StackSafetyInfo>;
+
+/// This pass performs the global (interprocedural) stack safety analysis (new
+/// pass manager).
+class StackSafetyGlobalAnalysis
+ : public AnalysisInfoMixin<StackSafetyGlobalAnalysis> {
+ friend AnalysisInfoMixin<StackSafetyGlobalAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ using Result = StackSafetyGlobalInfo;
+ Result run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Printer pass for the \c StackSafetyGlobalAnalysis results.
+class StackSafetyGlobalPrinterPass
+ : public PassInfoMixin<StackSafetyGlobalPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit StackSafetyGlobalPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// This pass performs the global (interprocedural) stack safety analysis
+/// (legacy pass manager).
+class StackSafetyGlobalInfoWrapperPass : public ModulePass {
+ StackSafetyGlobalInfo SSI;
+
+public:
+ static char ID;
+
+ StackSafetyGlobalInfoWrapperPass();
+
+ const StackSafetyGlobalInfo &getResult() const { return SSI; }
+
+ void print(raw_ostream &O, const Module *M) const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnModule(Module &M) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_STACKSAFETYANALYSIS_H
diff --git a/contrib/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h
new file mode 100644
index 000000000000..df693d9d8e8c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h
@@ -0,0 +1,86 @@
+//===- SyncDependenceAnalysis.h - Divergent Branch Dependence -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file defines the SyncDependenceAnalysis class, which computes for
+// every divergent branch the set of phi nodes that the branch will make
+// divergent.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SYNC_DEPENDENCE_ANALYSIS_H
+#define LLVM_ANALYSIS_SYNC_DEPENDENCE_ANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include <memory>
+
+namespace llvm {
+
+class BasicBlock;
+class DominatorTree;
+class Loop;
+class PostDominatorTree;
+
+using ConstBlockSet = SmallPtrSet<const BasicBlock *, 4>;
+
+/// \brief Relates points of divergent control to join points in
+/// reducible CFGs.
+///
+/// This analysis relates points of divergent control to points of converging
+/// divergent control. The analysis requires all loops to be reducible.
+class SyncDependenceAnalysis {
+ void visitSuccessor(const BasicBlock &succBlock, const Loop *termLoop,
+ const BasicBlock *defBlock);
+
+public:
+ bool inRegion(const BasicBlock &BB) const;
+
+ ~SyncDependenceAnalysis();
+ SyncDependenceAnalysis(const DominatorTree &DT, const PostDominatorTree &PDT,
+ const LoopInfo &LI);
+
+ /// \brief Computes divergent join points and loop exits caused by branch
+ /// divergence in \p Term.
+ ///
+ /// The set of blocks which are reachable by disjoint paths from \p Term.
+ /// The set also contains loop exits if there two disjoint paths:
+ /// one from \p Term to the loop exit and another from \p Term to the loop
+ /// header. Those exit blocks are added to the returned set.
+ /// If L is the parent loop of \p Term and an exit of L is in the returned
+ /// set then L is a divergent loop.
+ const ConstBlockSet &join_blocks(const Instruction &Term);
+
+ /// \brief Computes divergent join points and loop exits (in the surrounding
+ /// loop) caused by the divergent loop exits of\p Loop.
+ ///
+ /// The set of blocks which are reachable by disjoint paths from the
+ /// loop exits of \p Loop.
+ /// This treats the loop as a single node in \p Loop's parent loop.
+ /// The returned set has the same properties as for join_blocks(TermInst&).
+ const ConstBlockSet &join_blocks(const Loop &Loop);
+
+private:
+ static ConstBlockSet EmptyBlockSet;
+
+ ReversePostOrderTraversal<const Function *> FuncRPOT;
+ const DominatorTree &DT;
+ const PostDominatorTree &PDT;
+ const LoopInfo &LI;
+
+ std::map<const Loop *, std::unique_ptr<ConstBlockSet>> CachedLoopExitJoins;
+ std::map<const Instruction *, std::unique_ptr<ConstBlockSet>>
+ CachedBranchJoins;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_SYNC_DEPENDENCE_ANALYSIS_H
diff --git a/contrib/llvm/include/llvm/Analysis/SyntheticCountsUtils.h b/contrib/llvm/include/llvm/Analysis/SyntheticCountsUtils.h
index 87f4a0100b38..db80bef001e2 100644
--- a/contrib/llvm/include/llvm/Analysis/SyntheticCountsUtils.h
+++ b/contrib/llvm/include/llvm/Analysis/SyntheticCountsUtils.h
@@ -36,16 +36,17 @@ public:
using EdgeRef = typename CGT::EdgeRef;
using SccTy = std::vector<NodeRef>;
- using GetRelBBFreqTy = function_ref<Optional<Scaled64>(EdgeRef)>;
- using GetCountTy = function_ref<uint64_t(NodeRef)>;
- using AddCountTy = function_ref<void(NodeRef, uint64_t)>;
+ // Not all EdgeRef have information about the source of the edge. Hence
+ // NodeRef corresponding to the source of the EdgeRef is explicitly passed.
+ using GetProfCountTy = function_ref<Optional<Scaled64>(NodeRef, EdgeRef)>;
+ using AddCountTy = function_ref<void(NodeRef, Scaled64)>;
- static void propagate(const CallGraphType &CG, GetRelBBFreqTy GetRelBBFreq,
- GetCountTy GetCount, AddCountTy AddCount);
+ static void propagate(const CallGraphType &CG, GetProfCountTy GetProfCount,
+ AddCountTy AddCount);
private:
- static void propagateFromSCC(const SccTy &SCC, GetRelBBFreqTy GetRelBBFreq,
- GetCountTy GetCount, AddCountTy AddCount);
+ static void propagateFromSCC(const SccTy &SCC, GetProfCountTy GetProfCount,
+ AddCountTy AddCount);
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
index f94debba9c52..518a85ee1a01 100644
--- a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -565,6 +565,30 @@ TLI_DEFINE_STRING_INTERNAL("cosl")
/// char *ctermid(char *s);
TLI_DEFINE_ENUM_INTERNAL(ctermid)
TLI_DEFINE_STRING_INTERNAL("ctermid")
+/// int execl(const char *path, const char *arg, ...);
+TLI_DEFINE_ENUM_INTERNAL(execl)
+TLI_DEFINE_STRING_INTERNAL("execl")
+/// int execle(const char *file, const char *arg, ..., char * const envp[]);
+TLI_DEFINE_ENUM_INTERNAL(execle)
+TLI_DEFINE_STRING_INTERNAL("execle")
+/// int execlp(const char *file, const char *arg, ...);
+TLI_DEFINE_ENUM_INTERNAL(execlp)
+TLI_DEFINE_STRING_INTERNAL("execlp")
+/// int execv(const char *path, char *const argv[]);
+TLI_DEFINE_ENUM_INTERNAL(execv)
+TLI_DEFINE_STRING_INTERNAL("execv")
+/// int execvP(const char *file, const char *search_path, char *const argv[]);
+TLI_DEFINE_ENUM_INTERNAL(execvP)
+TLI_DEFINE_STRING_INTERNAL("execvP")
+/// int execve(const char *filename, char *const argv[], char *const envp[]);
+TLI_DEFINE_ENUM_INTERNAL(execve)
+TLI_DEFINE_STRING_INTERNAL("execve")
+/// int execvp(const char *file, char *const argv[]);
+TLI_DEFINE_ENUM_INTERNAL(execvp)
+TLI_DEFINE_STRING_INTERNAL("execvp")
+/// int execvpe(const char *file, char *const argv[], char *const envp[]);
+TLI_DEFINE_ENUM_INTERNAL(execvpe)
+TLI_DEFINE_STRING_INTERNAL("execvpe")
/// double exp(double x);
TLI_DEFINE_ENUM_INTERNAL(exp)
TLI_DEFINE_STRING_INTERNAL("exp")
@@ -709,6 +733,9 @@ TLI_DEFINE_STRING_INTERNAL("fopen")
/// FILE *fopen64(const char *filename, const char *opentype)
TLI_DEFINE_ENUM_INTERNAL(fopen64)
TLI_DEFINE_STRING_INTERNAL("fopen64")
+/// int fork();
+TLI_DEFINE_ENUM_INTERNAL(fork)
+TLI_DEFINE_STRING_INTERNAL("fork")
/// int fprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(fprintf)
TLI_DEFINE_STRING_INTERNAL("fprintf")
diff --git a/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h b/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 59657cca40f5..223175d17c2d 100644
--- a/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -289,7 +289,7 @@ public:
/// Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
- /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
+ /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis first
/// builds the dependency graph, and then runs the reachability algorithm
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
@@ -581,12 +581,21 @@ public:
struct MemCmpExpansionOptions {
// The list of available load sizes (in bytes), sorted in decreasing order.
SmallVector<unsigned, 8> LoadSizes;
+ // Set to true to allow overlapping loads. For example, 7-byte compares can
+ // be done with two 4-byte compares instead of 4+2+1-byte compares. This
+ // requires all loads in LoadSizes to be doable in an unaligned way.
+ bool AllowOverlappingLoads = false;
};
const MemCmpExpansionOptions *enableMemCmpExpansion(bool IsZeroCmp) const;
/// Enable matching of interleaved access groups.
bool enableInterleavedAccessVectorization() const;
+ /// Enable matching of interleaved access groups that contain predicated
+ /// accesses or gaps and therefore vectorized using masked
+ /// vector loads/stores.
+ bool enableMaskedInterleavedAccessVectorization() const;
+
/// Indicate that it is potentially unsafe to automatically vectorize
/// floating-point operations because the semantics of vector and scalar
/// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
@@ -739,6 +748,10 @@ public:
/// and the number of execution units in the CPU.
unsigned getMaxInterleaveFactor(unsigned VF) const;
+ /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
+ static OperandValueKind getOperandInfo(Value *V,
+ OperandValueProperties &OpProps);
+
/// This is an approximation of reciprocal throughput of a math/logic op.
/// A higher cost indicates less expected throughput.
/// From Agner Fog's guides, reciprocal throughput is "the average number of
@@ -762,7 +775,9 @@ public:
/// \return The cost of a shuffle instruction of kind Kind and of type Tp.
/// The index and subtype parameters are used by the subvector insertion and
- /// extraction shuffle kinds.
+ /// extraction shuffle kinds to show the insert/extract point and the type of
+ /// the subvector being inserted/extracted.
+ /// NOTE: For subvector extractions Tp represents the source type.
int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
Type *SubTp = nullptr) const;
@@ -817,9 +832,13 @@ public:
/// load allows gaps)
/// \p Alignment is the alignment of the memory operation
/// \p AddressSpace is address space of the pointer.
+ /// \p UseMaskForCond indicates if the memory access is predicated.
+ /// \p UseMaskForGaps indicates if gaps should be masked.
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
- unsigned AddressSpace) const;
+ unsigned AddressSpace,
+ bool UseMaskForCond = false,
+ bool UseMaskForGaps = false) const;
/// Calculate the cost of performing a vector reduction.
///
@@ -915,6 +934,14 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
+ /// \returns True if the caller and callee agree on how \p Args will be passed
+ /// to the callee.
+ /// \param[out] Args The list of compatible arguments. The implementation may
+ /// filter out any incompatible args from this list.
+ bool areFunctionArgsABICompatible(const Function *Caller,
+ const Function *Callee,
+ SmallPtrSetImpl<Argument *> &Args) const;
+
/// The type of load/store indexing.
enum MemIndexedMode {
MIM_Unindexed, ///< No indexing.
@@ -1068,6 +1095,7 @@ public:
virtual const MemCmpExpansionOptions *enableMemCmpExpansion(
bool IsZeroCmp) const = 0;
virtual bool enableInterleavedAccessVectorization() = 0;
+ virtual bool enableMaskedInterleavedAccessVectorization() = 0;
virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth,
@@ -1128,7 +1156,9 @@ public:
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
- unsigned AddressSpace) = 0;
+ unsigned AddressSpace,
+ bool UseMaskForCond = false,
+ bool UseMaskForGaps = false) = 0;
virtual int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) = 0;
virtual int getMinMaxReductionCost(Type *Ty, Type *CondTy,
@@ -1157,6 +1187,9 @@ public:
unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
virtual bool areInlineCompatible(const Function *Caller,
const Function *Callee) const = 0;
+ virtual bool
+ areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
+ SmallPtrSetImpl<Argument *> &Args) const = 0;
virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
virtual bool isIndexedStoreLegal(MemIndexedMode Mode,Type *Ty) const = 0;
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
@@ -1342,6 +1375,9 @@ public:
bool enableInterleavedAccessVectorization() override {
return Impl.enableInterleavedAccessVectorization();
}
+ bool enableMaskedInterleavedAccessVectorization() override {
+ return Impl.enableMaskedInterleavedAccessVectorization();
+ }
bool isFPVectorizationPotentiallyUnsafe() override {
return Impl.isFPVectorizationPotentiallyUnsafe();
}
@@ -1467,9 +1503,11 @@ public:
}
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
- unsigned AddressSpace) override {
+ unsigned AddressSpace, bool UseMaskForCond,
+ bool UseMaskForGaps) override {
return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
- Alignment, AddressSpace);
+ Alignment, AddressSpace,
+ UseMaskForCond, UseMaskForGaps);
}
int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) override {
@@ -1530,6 +1568,11 @@ public:
const Function *Callee) const override {
return Impl.areInlineCompatible(Caller, Callee);
}
+ bool areFunctionArgsABICompatible(
+ const Function *Caller, const Function *Callee,
+ SmallPtrSetImpl<Argument *> &Args) const override {
+ return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
+ }
bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
}
diff --git a/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index d80ae1d6845d..c9a234deeb7d 100644
--- a/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -158,6 +158,9 @@ public:
case Intrinsic::dbg_label:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
+ case Intrinsic::launder_invariant_group:
+ case Intrinsic::strip_invariant_group:
+ case Intrinsic::is_constant:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::objectsize:
@@ -311,6 +314,8 @@ public:
bool enableInterleavedAccessVectorization() { return false; }
+ bool enableMaskedInterleavedAccessVectorization() { return false; }
+
bool isFPVectorizationPotentiallyUnsafe() { return false; }
bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
@@ -448,8 +453,9 @@ public:
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
- unsigned Alignment,
- unsigned AddressSpace) {
+ unsigned Alignment, unsigned AddressSpace,
+ bool UseMaskForCond = false,
+ bool UseMaskForGaps = false) {
return 1;
}
@@ -520,6 +526,14 @@ public:
Callee->getFnAttribute("target-features"));
}
+ bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
+ SmallPtrSetImpl<Argument *> &Args) const {
+ return (Caller->getFnAttribute("target-cpu") ==
+ Callee->getFnAttribute("target-cpu")) &&
+ (Caller->getFnAttribute("target-features") ==
+ Callee->getFnAttribute("target-features"));
+ }
+
bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
const DataLayout &DL) const {
return false;
diff --git a/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
index 7fcfdb3a817c..d2e6df22425e 100644
--- a/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -17,7 +17,7 @@
#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
@@ -43,10 +43,10 @@ public:
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
FunctionModRefBehavior getModRefBehavior(const Function *F);
- ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
- ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+ ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
private:
bool Aliases(const MDNode *A, const MDNode *B) const;
diff --git a/contrib/llvm/include/llvm/Analysis/TypeMetadataUtils.h b/contrib/llvm/include/llvm/Analysis/TypeMetadataUtils.h
index 6764563f6830..3bf9c5d20741 100644
--- a/contrib/llvm/include/llvm/Analysis/TypeMetadataUtils.h
+++ b/contrib/llvm/include/llvm/Analysis/TypeMetadataUtils.h
@@ -20,6 +20,8 @@
namespace llvm {
+class DominatorTree;
+
/// The type of CFI jumptable needed for a function.
enum CfiFunctionLinkage {
CFL_Definition = 0,
@@ -39,7 +41,8 @@ struct DevirtCallSite {
/// call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCallsForTypeTest(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
- SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI);
+ SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI,
+ DominatorTree &DT);
/// Given a call to the intrinsic \@llvm.type.checked.load, find all
/// devirtualizable call sites based on the call and return them in DevirtCalls.
@@ -47,7 +50,7 @@ void findDevirtualizableCallsForTypeCheckedLoad(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<Instruction *> &LoadedPtrs,
SmallVectorImpl<Instruction *> &Preds, bool &HasNonCallUses,
- const CallInst *CI);
+ const CallInst *CI, DominatorTree &DT);
}
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index c1a91a8e5981..f46fdfcb608e 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -55,14 +55,16 @@ class Value;
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr);
+ OptimizationRemarkEmitter *ORE = nullptr,
+ bool UseInstrInfo = true);
/// Returns the known bits rather than passing by reference.
KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr);
+ OptimizationRemarkEmitter *ORE = nullptr,
+ bool UseInstrInfo = true);
/// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero
@@ -75,7 +77,8 @@ class Value;
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the given value is known to have exactly one bit set when
/// defined. For vectors return true if every element is known to be a power
@@ -86,7 +89,8 @@ class Value;
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
@@ -99,7 +103,8 @@ class Value;
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
@@ -112,28 +117,32 @@ class Value;
unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Returns true if the given value is known be positive (i.e. non-negative
/// and non-zero).
bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Returns true if the given value is known be negative (i.e. non-positive
/// and non-zero).
bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the given values are known to be non-equal when defined.
/// Supports scalar integer types only.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if 'V & Mask' is known to be zero. We use this predicate to
/// simplify operations downstream. Mask is known to be zero for bits that V
@@ -148,7 +157,8 @@ class Value;
const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign
@@ -160,7 +170,8 @@ class Value;
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// This function computes the integer multiple of Base that equals V. If
/// successful, it returns true and returns the multiple in Multiple. If
@@ -194,7 +205,8 @@ class Value;
/// Return true if the floating-point scalar value is not a NaN or if the
/// floating-point vector value has no NaN elements. Return false if a value
/// could ever be NaN.
- bool isKnownNeverNaN(const Value *V);
+ bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
+ unsigned Depth = 0);
/// Return true if we can prove that the specified FP value's sign bit is 0.
///
@@ -209,7 +221,8 @@ class Value;
/// return the i8 value that it is represented with. This is true for all i8
/// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
/// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
- /// i16 0x1234), return null.
+ /// i16 0x1234), return null. If the value is entirely undef and padding,
+ /// return undef.
Value *isBytewiseValue(Value *V);
/// Given an aggregrate and an sequence of indices, see if the scalar value
@@ -284,10 +297,10 @@ class Value;
/// This function returns call pointer argument that is considered the same by
/// aliasing rules. You CAN'T use it to replace one value with another.
- const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS);
- inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) {
- return const_cast<Value *>(
- getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS)));
+ const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call);
+ inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call) {
+ return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
+ const_cast<const CallBase *>(Call)));
}
// {launder,strip}.invariant.group returns pointer that aliases its argument,
@@ -296,7 +309,7 @@ class Value;
// considered as capture. The arguments are not marked as returned neither,
// because it would make it useless.
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
- ImmutableCallSite CS);
+ const CallBase *Call);
/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note
@@ -405,18 +418,21 @@ class Value;
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
@@ -594,6 +610,12 @@ class Value;
Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL, bool LHSIsTrue = true,
unsigned Depth = 0);
+
+ /// Return the boolean condition value in the context of the given instruction
+ /// if it is known based on dominating conditions.
+ Optional<bool> isImpliedByDomCondition(const Value *Cond,
+ const Instruction *ContextI,
+ const DataLayout &DL);
} // end namespace llvm
#endif // LLVM_ANALYSIS_VALUETRACKING_H
diff --git a/contrib/llvm/include/llvm/Analysis/VectorUtils.h b/contrib/llvm/include/llvm/Analysis/VectorUtils.h
index 9fde36d61091..be4d4f17b9ad 100644
--- a/contrib/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/contrib/llvm/include/llvm/Analysis/VectorUtils.h
@@ -15,6 +15,7 @@
#define LLVM_ANALYSIS_VECTORUTILS_H
#include "llvm/ADT/MapVector.h"
+#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IRBuilder.h"
@@ -23,6 +24,7 @@ namespace llvm {
template <typename T> class ArrayRef;
class DemandedBits;
class GetElementPtrInst;
+template <typename InstTy> class InterleaveGroup;
class Loop;
class ScalarEvolution;
class TargetTransformInfo;
@@ -115,8 +117,24 @@ computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
DemandedBits &DB,
const TargetTransformInfo *TTI=nullptr);
+/// Compute the union of two access-group lists.
+///
+/// If the list contains just one access group, it is returned directly. If the
+/// list is empty, returns nullptr.
+MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
+
+/// Compute the access-group list of access groups that @p Inst1 and @p Inst2
+/// are both in. If either instruction does not access memory at all, it is
+/// considered to be in every list.
+///
+/// If the list contains just one access group, it is returned directly. If the
+/// list is empty, returns nullptr.
+MDNode *intersectAccessGroups(const Instruction *Inst1,
+ const Instruction *Inst2);
+
/// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
-/// MD_nontemporal]. For K in Kinds, we get the MDNode for K from each of the
+/// MD_nontemporal, MD_access_group].
+/// For K in Kinds, we get the MDNode for K from each of the
/// elements of VL, compute their "intersection" (i.e., the most generic
/// metadata value that covers all of the individual values), and set I's
/// metadata for M equal to the intersection value.
@@ -124,6 +142,35 @@ computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
/// This function always sets a (possibly null) value for each K in Kinds.
Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
+/// Create a mask that filters the members of an interleave group where there
+/// are gaps.
+///
+/// For example, the mask for \p Group with interleave-factor 3
+/// and \p VF 4, that has only its first member present is:
+///
+/// <1,0,0,1,0,0,1,0,0,1,0,0>
+///
+/// Note: The result is a mask of 0's and 1's, as opposed to the other
+/// create[*]Mask() utilities which create a shuffle mask (mask that
+/// consists of indices).
+Constant *createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
+ const InterleaveGroup<Instruction> &Group);
+
+/// Create a mask with replicated elements.
+///
+/// This function creates a shuffle mask for replicating each of the \p VF
+/// elements in a vector \p ReplicationFactor times. It can be used to
+/// transform a mask of \p VF elements into a mask of
+/// \p VF * \p ReplicationFactor elements used by a predicated
+/// interleaved-group of loads/stores whose Interleaved-factor ==
+/// \p ReplicationFactor.
+///
+/// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
+///
+/// <0,0,0,1,1,1,2,2,2,3,3,3>
+Constant *createReplicatedMask(IRBuilder<> &Builder, unsigned ReplicationFactor,
+ unsigned VF);
+
/// Create an interleave shuffle mask.
///
/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
@@ -176,6 +223,381 @@ Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
/// elements, it will be padded with undefs.
Value *concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs);
+/// The group of interleaved loads/stores sharing the same stride and
+/// close to each other.
+///
+/// Each member in this group has an index starting from 0, and the largest
+/// index should be less than interleaved factor, which is equal to the absolute
+/// value of the access's stride.
+///
+/// E.g. An interleaved load group of factor 4:
+/// for (unsigned i = 0; i < 1024; i+=4) {
+/// a = A[i]; // Member of index 0
+/// b = A[i+1]; // Member of index 1
+/// d = A[i+3]; // Member of index 3
+/// ...
+/// }
+///
+/// An interleaved store group of factor 4:
+/// for (unsigned i = 0; i < 1024; i+=4) {
+/// ...
+/// A[i] = a; // Member of index 0
+/// A[i+1] = b; // Member of index 1
+/// A[i+2] = c; // Member of index 2
+/// A[i+3] = d; // Member of index 3
+/// }
+///
+/// Note: the interleaved load group could have gaps (missing members), but
+/// the interleaved store group doesn't allow gaps.
+template <typename InstTy> class InterleaveGroup {
+public:
+ InterleaveGroup(unsigned Factor, bool Reverse, unsigned Align)
+ : Factor(Factor), Reverse(Reverse), Align(Align), InsertPos(nullptr) {}
+
+ InterleaveGroup(InstTy *Instr, int Stride, unsigned Align)
+ : Align(Align), InsertPos(Instr) {
+ assert(Align && "The alignment should be non-zero");
+
+ Factor = std::abs(Stride);
+ assert(Factor > 1 && "Invalid interleave factor");
+
+ Reverse = Stride < 0;
+ Members[0] = Instr;
+ }
+
+ bool isReverse() const { return Reverse; }
+ unsigned getFactor() const { return Factor; }
+ unsigned getAlignment() const { return Align; }
+ unsigned getNumMembers() const { return Members.size(); }
+
+ /// Try to insert a new member \p Instr with index \p Index and
+ /// alignment \p NewAlign. The index is related to the leader and it could be
+ /// negative if it is the new leader.
+ ///
+ /// \returns false if the instruction doesn't belong to the group.
+ bool insertMember(InstTy *Instr, int Index, unsigned NewAlign) {
+ assert(NewAlign && "The new member's alignment should be non-zero");
+
+ int Key = Index + SmallestKey;
+
+ // Skip if there is already a member with the same index.
+ if (Members.find(Key) != Members.end())
+ return false;
+
+ if (Key > LargestKey) {
+ // The largest index is always less than the interleave factor.
+ if (Index >= static_cast<int>(Factor))
+ return false;
+
+ LargestKey = Key;
+ } else if (Key < SmallestKey) {
+ // The largest index is always less than the interleave factor.
+ if (LargestKey - Key >= static_cast<int>(Factor))
+ return false;
+
+ SmallestKey = Key;
+ }
+
+ // It's always safe to select the minimum alignment.
+ Align = std::min(Align, NewAlign);
+ Members[Key] = Instr;
+ return true;
+ }
+
+ /// Get the member with the given index \p Index
+ ///
+ /// \returns nullptr if contains no such member.
+ InstTy *getMember(unsigned Index) const {
+ int Key = SmallestKey + Index;
+ auto Member = Members.find(Key);
+ if (Member == Members.end())
+ return nullptr;
+
+ return Member->second;
+ }
+
+ /// Get the index for the given member. Unlike the key in the member
+ /// map, the index starts from 0.
+ unsigned getIndex(const InstTy *Instr) const {
+ for (auto I : Members) {
+ if (I.second == Instr)
+ return I.first - SmallestKey;
+ }
+
+ llvm_unreachable("InterleaveGroup contains no such member");
+ }
+
+ InstTy *getInsertPos() const { return InsertPos; }
+ void setInsertPos(InstTy *Inst) { InsertPos = Inst; }
+
+ /// Add metadata (e.g. alias info) from the instructions in this group to \p
+ /// NewInst.
+ ///
+ /// FIXME: this function currently does not add noalias metadata a'la
+ /// addNewMedata. To do that we need to compute the intersection of the
+ /// noalias info from all members.
+ void addMetadata(InstTy *NewInst) const;
+
+ /// Returns true if this Group requires a scalar iteration to handle gaps.
+ bool requiresScalarEpilogue() const {
+ // If the last member of the Group exists, then a scalar epilog is not
+ // needed for this group.
+ if (getMember(getFactor() - 1))
+ return false;
+
+ // We have a group with gaps. It therefore cannot be a group of stores,
+ // and it can't be a reversed access, because such groups get invalidated.
+ assert(!getMember(0)->mayWriteToMemory() &&
+ "Group should have been invalidated");
+ assert(!isReverse() && "Group should have been invalidated");
+
+ // This is a group of loads, with gaps, and without a last-member
+ return true;
+ }
+
+private:
+ unsigned Factor; // Interleave Factor.
+ bool Reverse;
+ unsigned Align;
+ DenseMap<int, InstTy *> Members;
+ int SmallestKey = 0;
+ int LargestKey = 0;
+
+ // To avoid breaking dependences, vectorized instructions of an interleave
+ // group should be inserted at either the first load or the last store in
+ // program order.
+ //
+ // E.g. %even = load i32 // Insert Position
+ // %add = add i32 %even // Use of %even
+ // %odd = load i32
+ //
+ // store i32 %even
+ // %odd = add i32 // Def of %odd
+ // store i32 %odd // Insert Position
+ InstTy *InsertPos;
+};
+
+/// Drive the analysis of interleaved memory accesses in the loop.
+///
+/// Use this class to analyze interleaved accesses only when we can vectorize
+/// a loop. Otherwise it's meaningless to do analysis as the vectorization
+/// on interleaved accesses is unsafe.
+///
+/// The analysis collects interleave groups and records the relationships
+/// between the member and the group in a map.
+class InterleavedAccessInfo {
+public:
+ InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
+ DominatorTree *DT, LoopInfo *LI,
+ const LoopAccessInfo *LAI)
+ : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {}
+
+ ~InterleavedAccessInfo() { reset(); }
+
+ /// Analyze the interleaved accesses and collect them in interleave
+ /// groups. Substitute symbolic strides using \p Strides.
+ /// Consider also predicated loads/stores in the analysis if
+ /// \p EnableMaskedInterleavedGroup is true.
+ void analyzeInterleaving(bool EnableMaskedInterleavedGroup);
+
+ /// Invalidate groups, e.g., in case all blocks in loop will be predicated
+ /// contrary to original assumption. Although we currently prevent group
+ /// formation for predicated accesses, we may be able to relax this limitation
+ /// in the future once we handle more complicated blocks.
+ void reset() {
+ SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
+ // Avoid releasing a pointer twice.
+ for (auto &I : InterleaveGroupMap)
+ DelSet.insert(I.second);
+ for (auto *Ptr : DelSet)
+ delete Ptr;
+ InterleaveGroupMap.clear();
+ RequiresScalarEpilogue = false;
+ }
+
+
+ /// Check if \p Instr belongs to any interleave group.
+ bool isInterleaved(Instruction *Instr) const {
+ return InterleaveGroupMap.find(Instr) != InterleaveGroupMap.end();
+ }
+
+ /// Get the interleave group that \p Instr belongs to.
+ ///
+ /// \returns nullptr if doesn't have such group.
+ InterleaveGroup<Instruction> *
+ getInterleaveGroup(const Instruction *Instr) const {
+ if (InterleaveGroupMap.count(Instr))
+ return InterleaveGroupMap.find(Instr)->second;
+ return nullptr;
+ }
+
+ iterator_range<SmallPtrSetIterator<llvm::InterleaveGroup<Instruction> *>>
+ getInterleaveGroups() {
+ return make_range(InterleaveGroups.begin(), InterleaveGroups.end());
+ }
+
+ /// Returns true if an interleaved group that may access memory
+ /// out-of-bounds requires a scalar epilogue iteration for correctness.
+ bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
+
+ /// Invalidate groups that require a scalar epilogue (due to gaps). This can
+ /// happen when optimizing for size forbids a scalar epilogue, and the gap
+ /// cannot be filtered by masking the load/store.
+ void invalidateGroupsRequiringScalarEpilogue();
+
+private:
+ /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
+ /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
+ /// The interleaved access analysis can also add new predicates (for example
+ /// by versioning strides of pointers).
+ PredicatedScalarEvolution &PSE;
+
+ Loop *TheLoop;
+ DominatorTree *DT;
+ LoopInfo *LI;
+ const LoopAccessInfo *LAI;
+
+ /// True if the loop may contain non-reversed interleaved groups with
+ /// out-of-bounds accesses. We ensure we don't speculatively access memory
+ /// out-of-bounds by executing at least one scalar epilogue iteration.
+ bool RequiresScalarEpilogue = false;
+
+ /// Holds the relationships between the members and the interleave group.
+ DenseMap<Instruction *, InterleaveGroup<Instruction> *> InterleaveGroupMap;
+
+ SmallPtrSet<InterleaveGroup<Instruction> *, 4> InterleaveGroups;
+
+ /// Holds dependences among the memory accesses in the loop. It maps a source
+ /// access to a set of dependent sink accesses.
+ DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
+
+ /// The descriptor for a strided memory access.
+ struct StrideDescriptor {
+ StrideDescriptor() = default;
+ StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
+ unsigned Align)
+ : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
+
+ // The access's stride. It is negative for a reverse access.
+ int64_t Stride = 0;
+
+ // The scalar expression of this access.
+ const SCEV *Scev = nullptr;
+
+ // The size of the memory object.
+ uint64_t Size = 0;
+
+ // The alignment of this access.
+ unsigned Align = 0;
+ };
+
+ /// A type for holding instructions and their stride descriptors.
+ using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
+
+ /// Create a new interleave group with the given instruction \p Instr,
+ /// stride \p Stride and alignment \p Align.
+ ///
+ /// \returns the newly created interleave group.
+ InterleaveGroup<Instruction> *
+ createInterleaveGroup(Instruction *Instr, int Stride, unsigned Align) {
+ assert(!InterleaveGroupMap.count(Instr) &&
+ "Already in an interleaved access group");
+ InterleaveGroupMap[Instr] =
+ new InterleaveGroup<Instruction>(Instr, Stride, Align);
+ InterleaveGroups.insert(InterleaveGroupMap[Instr]);
+ return InterleaveGroupMap[Instr];
+ }
+
+ /// Release the group and remove all the relationships.
+ void releaseGroup(InterleaveGroup<Instruction> *Group) {
+ for (unsigned i = 0; i < Group->getFactor(); i++)
+ if (Instruction *Member = Group->getMember(i))
+ InterleaveGroupMap.erase(Member);
+
+ InterleaveGroups.erase(Group);
+ delete Group;
+ }
+
+ /// Collect all the accesses with a constant stride in program order.
+ void collectConstStrideAccesses(
+ MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
+ const ValueToValueMap &Strides);
+
+ /// Returns true if \p Stride is allowed in an interleaved group.
+ static bool isStrided(int Stride);
+
+ /// Returns true if \p BB is a predicated block.
+ bool isPredicated(BasicBlock *BB) const {
+ return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
+ }
+
+ /// Returns true if LoopAccessInfo can be used for dependence queries.
+ bool areDependencesValid() const {
+ return LAI && LAI->getDepChecker().getDependences();
+ }
+
+ /// Returns true if memory accesses \p A and \p B can be reordered, if
+ /// necessary, when constructing interleaved groups.
+ ///
+ /// \p A must precede \p B in program order. We return false if reordering is
+ /// not necessary or is prevented because \p A and \p B may be dependent.
+ bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
+ StrideEntry *B) const {
+ // Code motion for interleaved accesses can potentially hoist strided loads
+ // and sink strided stores. The code below checks the legality of the
+ // following two conditions:
+ //
+ // 1. Potentially moving a strided load (B) before any store (A) that
+ // precedes B, or
+ //
+ // 2. Potentially moving a strided store (A) after any load or store (B)
+ // that A precedes.
+ //
+ // It's legal to reorder A and B if we know there isn't a dependence from A
+ // to B. Note that this determination is conservative since some
+ // dependences could potentially be reordered safely.
+
+ // A is potentially the source of a dependence.
+ auto *Src = A->first;
+ auto SrcDes = A->second;
+
+ // B is potentially the sink of a dependence.
+ auto *Sink = B->first;
+ auto SinkDes = B->second;
+
+ // Code motion for interleaved accesses can't violate WAR dependences.
+ // Thus, reordering is legal if the source isn't a write.
+ if (!Src->mayWriteToMemory())
+ return true;
+
+ // At least one of the accesses must be strided.
+ if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
+ return true;
+
+ // If dependence information is not available from LoopAccessInfo,
+ // conservatively assume the instructions can't be reordered.
+ if (!areDependencesValid())
+ return false;
+
+ // If we know there is a dependence from source to sink, assume the
+ // instructions can't be reordered. Otherwise, reordering is legal.
+ return Dependences.find(Src) == Dependences.end() ||
+ !Dependences.lookup(Src).count(Sink);
+ }
+
+ /// Collect the dependences from LoopAccessInfo.
+ ///
+ /// We process the dependences once during the interleaved access analysis to
+ /// enable constant-time dependence queries.
+ void collectDependences() {
+ if (!areDependencesValid())
+ return;
+ auto *Deps = LAI->getDepChecker().getDependences();
+ for (auto Dep : *Deps)
+ Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
+ }
+};
+
} // llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h b/contrib/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h
new file mode 100644
index 000000000000..de44f41720ed
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h
@@ -0,0 +1,70 @@
+//===- AMDGPUMetadataVerifier.h - MsgPack Types -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This is a verifier for AMDGPU HSA metadata, which can verify both
+/// well-typed metadata and untyped metadata. When verifying in the non-strict
+/// mode, untyped metadata is coerced into the correct type if possible.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
+#define LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
+
+#include "llvm/BinaryFormat/MsgPackTypes.h"
+
+namespace llvm {
+namespace AMDGPU {
+namespace HSAMD {
+namespace V3 {
+
+/// Verifier for AMDGPU HSA metadata.
+///
+/// Operates in two modes:
+///
+/// In strict mode, metadata must already be well-typed.
+///
+/// In non-strict mode, metadata is coerced into expected types when possible.
+class MetadataVerifier {
+ bool Strict;
+
+ bool verifyScalar(msgpack::Node &Node, msgpack::ScalarNode::ScalarKind SKind,
+ function_ref<bool(msgpack::ScalarNode &)> verifyValue = {});
+ bool verifyInteger(msgpack::Node &Node);
+ bool verifyArray(msgpack::Node &Node,
+ function_ref<bool(msgpack::Node &)> verifyNode,
+ Optional<size_t> Size = None);
+ bool verifyEntry(msgpack::MapNode &MapNode, StringRef Key, bool Required,
+ function_ref<bool(msgpack::Node &)> verifyNode);
+ bool
+ verifyScalarEntry(msgpack::MapNode &MapNode, StringRef Key, bool Required,
+ msgpack::ScalarNode::ScalarKind SKind,
+ function_ref<bool(msgpack::ScalarNode &)> verifyValue = {});
+ bool verifyIntegerEntry(msgpack::MapNode &MapNode, StringRef Key,
+ bool Required);
+ bool verifyKernelArgs(msgpack::Node &Node);
+ bool verifyKernel(msgpack::Node &Node);
+
+public:
+ /// Construct a MetadataVerifier, specifying whether it will operate in \p
+ /// Strict mode.
+ MetadataVerifier(bool Strict) : Strict(Strict) {}
+
+ /// Verify given HSA metadata.
+ ///
+ /// \returns True when successful, false when metadata is invalid.
+ bool verify(msgpack::Node &HSAMetadataRoot);
+};
+
+} // end namespace V3
+} // end namespace HSAMD
+} // end namespace AMDGPU
+} // end namespace llvm
+
+#endif // LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
diff --git a/contrib/llvm/include/llvm/BinaryFormat/Dwarf.def b/contrib/llvm/include/llvm/BinaryFormat/Dwarf.def
index 944c5dd1c157..6ad3cb57f62f 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/Dwarf.def
+++ b/contrib/llvm/include/llvm/BinaryFormat/Dwarf.def
@@ -18,9 +18,11 @@
defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED || \
defined HANDLE_DW_CC || defined HANDLE_DW_LNS || defined HANDLE_DW_LNE || \
defined HANDLE_DW_LNCT || defined HANDLE_DW_MACRO || \
- defined HANDLE_DW_RLE || defined HANDLE_DW_CFA || \
+ defined HANDLE_DW_RLE || \
+ (defined HANDLE_DW_CFA && defined HANDLE_DW_CFA_PRED) || \
defined HANDLE_DW_APPLE_PROPERTY || defined HANDLE_DW_UT || \
- defined HANDLE_DWARF_SECTION || defined HANDLE_DW_IDX)
+ defined HANDLE_DWARF_SECTION || defined HANDLE_DW_IDX || \
+ defined HANDLE_DW_END)
#error "Missing macro definition of HANDLE_DW*"
#endif
@@ -41,7 +43,7 @@
#endif
#ifndef HANDLE_DW_LANG
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_ATE
@@ -84,6 +86,10 @@
#define HANDLE_DW_CFA(ID, NAME)
#endif
+#ifndef HANDLE_DW_CFA_PRED
+#define HANDLE_DW_CFA_PRED(ID, NAME, PRED)
+#endif
+
#ifndef HANDLE_DW_APPLE_PROPERTY
#define HANDLE_DW_APPLE_PROPERTY(ID, NAME)
#endif
@@ -100,6 +106,10 @@
#define HANDLE_DW_IDX(ID, NAME)
#endif
+#ifndef HANDLE_DW_END
+#define HANDLE_DW_END(ID, NAME)
+#endif
+
HANDLE_DW_TAG(0x0000, null, 2, DWARF)
HANDLE_DW_TAG(0x0001, array_type, 2, DWARF)
HANDLE_DW_TAG(0x0002, class_type, 2, DWARF)
@@ -622,50 +632,50 @@ HANDLE_DW_OP(0xfb, GNU_addr_index, 0, GNU)
HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU)
// DWARF languages.
-HANDLE_DW_LANG(0x0001, C89, 2, DWARF)
-HANDLE_DW_LANG(0x0002, C, 2, DWARF)
-HANDLE_DW_LANG(0x0003, Ada83, 2, DWARF)
-HANDLE_DW_LANG(0x0004, C_plus_plus, 2, DWARF)
-HANDLE_DW_LANG(0x0005, Cobol74, 2, DWARF)
-HANDLE_DW_LANG(0x0006, Cobol85, 2, DWARF)
-HANDLE_DW_LANG(0x0007, Fortran77, 2, DWARF)
-HANDLE_DW_LANG(0x0008, Fortran90, 2, DWARF)
-HANDLE_DW_LANG(0x0009, Pascal83, 2, DWARF)
-HANDLE_DW_LANG(0x000a, Modula2, 2, DWARF)
+HANDLE_DW_LANG(0x0001, C89, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0002, C, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0003, Ada83, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0004, C_plus_plus, 0, 2, DWARF)
+HANDLE_DW_LANG(0x0005, Cobol74, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0006, Cobol85, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0007, Fortran77, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0008, Fortran90, 1, 2, DWARF)
+HANDLE_DW_LANG(0x0009, Pascal83, 1, 2, DWARF)
+HANDLE_DW_LANG(0x000a, Modula2, 1, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_LANG(0x000b, Java, 3, DWARF)
-HANDLE_DW_LANG(0x000c, C99, 3, DWARF)
-HANDLE_DW_LANG(0x000d, Ada95, 3, DWARF)
-HANDLE_DW_LANG(0x000e, Fortran95, 3, DWARF)
-HANDLE_DW_LANG(0x000f, PLI, 3, DWARF)
-HANDLE_DW_LANG(0x0010, ObjC, 3, DWARF)
-HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 3, DWARF)
-HANDLE_DW_LANG(0x0012, UPC, 3, DWARF)
-HANDLE_DW_LANG(0x0013, D, 3, DWARF)
+HANDLE_DW_LANG(0x000b, Java, 0, 3, DWARF)
+HANDLE_DW_LANG(0x000c, C99, 0, 3, DWARF)
+HANDLE_DW_LANG(0x000d, Ada95, 1, 3, DWARF)
+HANDLE_DW_LANG(0x000e, Fortran95, 1, 3, DWARF)
+HANDLE_DW_LANG(0x000f, PLI, 1, 3, DWARF)
+HANDLE_DW_LANG(0x0010, ObjC, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0012, UPC, 0, 3, DWARF)
+HANDLE_DW_LANG(0x0013, D, 0, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_LANG(0x0014, Python, 4, DWARF)
+HANDLE_DW_LANG(0x0014, Python, 0, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_LANG(0x0015, OpenCL, 5, DWARF)
-HANDLE_DW_LANG(0x0016, Go, 5, DWARF)
-HANDLE_DW_LANG(0x0017, Modula3, 5, DWARF)
-HANDLE_DW_LANG(0x0018, Haskell, 5, DWARF)
-HANDLE_DW_LANG(0x0019, C_plus_plus_03, 5, DWARF)
-HANDLE_DW_LANG(0x001a, C_plus_plus_11, 5, DWARF)
-HANDLE_DW_LANG(0x001b, OCaml, 5, DWARF)
-HANDLE_DW_LANG(0x001c, Rust, 5, DWARF)
-HANDLE_DW_LANG(0x001d, C11, 5, DWARF)
-HANDLE_DW_LANG(0x001e, Swift, 5, DWARF)
-HANDLE_DW_LANG(0x001f, Julia, 5, DWARF)
-HANDLE_DW_LANG(0x0020, Dylan, 5, DWARF)
-HANDLE_DW_LANG(0x0021, C_plus_plus_14, 5, DWARF)
-HANDLE_DW_LANG(0x0022, Fortran03, 5, DWARF)
-HANDLE_DW_LANG(0x0023, Fortran08, 5, DWARF)
-HANDLE_DW_LANG(0x0024, RenderScript, 5, DWARF)
-HANDLE_DW_LANG(0x0025, BLISS, 5, DWARF)
+HANDLE_DW_LANG(0x0015, OpenCL, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0016, Go, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0017, Modula3, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0018, Haskell, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0019, C_plus_plus_03, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001a, C_plus_plus_11, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001b, OCaml, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001c, Rust, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001d, C11, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001e, Swift, 0, 5, DWARF)
+HANDLE_DW_LANG(0x001f, Julia, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0020, Dylan, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0021, C_plus_plus_14, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0022, Fortran03, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0023, Fortran08, 1, 5, DWARF)
+HANDLE_DW_LANG(0x0024, RenderScript, 0, 5, DWARF)
+HANDLE_DW_LANG(0x0025, BLISS, 0, 5, DWARF)
// Vendor extensions:
-HANDLE_DW_LANG(0x8001, Mips_Assembler, 0, MIPS)
-HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, GOOGLE)
-HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, BORLAND)
+HANDLE_DW_LANG(0x8001, Mips_Assembler, None, 0, MIPS)
+HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, 0, GOOGLE)
+HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, 0, BORLAND)
// DWARF attribute type encodings.
HANDLE_DW_ATE(0x01, address, 2, DWARF)
@@ -690,6 +700,11 @@ HANDLE_DW_ATE(0x10, UTF, 4, DWARF)
HANDLE_DW_ATE(0x11, UCS, 5, DWARF)
HANDLE_DW_ATE(0x12, ASCII, 5, DWARF)
+// DWARF attribute endianity
+HANDLE_DW_END(0x00, default)
+HANDLE_DW_END(0x01, big)
+HANDLE_DW_END(0x02, little)
+
// DWARF virtuality codes.
HANDLE_DW_VIRTUALITY(0x00, none)
HANDLE_DW_VIRTUALITY(0x01, virtual)
@@ -821,9 +836,10 @@ HANDLE_DW_CFA(0x14, val_offset)
HANDLE_DW_CFA(0x15, val_offset_sf)
HANDLE_DW_CFA(0x16, val_expression)
// Vendor extensions:
-HANDLE_DW_CFA(0x1d, MIPS_advance_loc8)
-HANDLE_DW_CFA(0x2d, GNU_window_save)
-HANDLE_DW_CFA(0x2e, GNU_args_size)
+HANDLE_DW_CFA_PRED(0x1d, MIPS_advance_loc8, SELECT_MIPS64)
+HANDLE_DW_CFA_PRED(0x2d, GNU_window_save, SELECT_SPARC)
+HANDLE_DW_CFA_PRED(0x2d, AARCH64_negate_ra_state, SELECT_AARCH64)
+HANDLE_DW_CFA_PRED(0x2e, GNU_args_size, SELECT_X86)
// Apple Objective-C Property Attributes.
// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
@@ -863,6 +879,7 @@ HANDLE_DWARF_SECTION(DebugTypes, ".debug_types", "debug-types")
HANDLE_DWARF_SECTION(DebugLine, ".debug_line", "debug-line")
HANDLE_DWARF_SECTION(DebugLineStr, ".debug_line_str", "debug-line-str")
HANDLE_DWARF_SECTION(DebugLoc, ".debug_loc", "debug-loc")
+HANDLE_DWARF_SECTION(DebugLoclists, ".debug_loclists", "debug-loclists")
HANDLE_DWARF_SECTION(DebugFrame, ".debug_frame", "debug-frame")
HANDLE_DWARF_SECTION(DebugMacro, ".debug_macro", "debug-macro")
HANDLE_DWARF_SECTION(DebugNames, ".debug_names", "debug-names")
@@ -905,7 +922,9 @@ HANDLE_DW_IDX(0x05, type_hash)
#undef HANDLE_DW_MACRO
#undef HANDLE_DW_RLE
#undef HANDLE_DW_CFA
+#undef HANDLE_DW_CFA_PRED
#undef HANDLE_DW_APPLE_PROPERTY
#undef HANDLE_DW_UT
#undef HANDLE_DWARF_SECTION
#undef HANDLE_DW_IDX
+#undef HANDLE_DW_END
diff --git a/contrib/llvm/include/llvm/BinaryFormat/Dwarf.h b/contrib/llvm/include/llvm/BinaryFormat/Dwarf.h
index 9036f405eaea..525a04d5e6cf 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/Dwarf.h
+++ b/contrib/llvm/include/llvm/BinaryFormat/Dwarf.h
@@ -26,6 +26,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/ADT/Triple.h"
namespace llvm {
class StringRef;
@@ -150,9 +151,8 @@ enum DecimalSignEncoding {
enum EndianityEncoding {
// Endianity attribute values
- DW_END_default = 0x00,
- DW_END_big = 0x01,
- DW_END_little = 0x02,
+#define HANDLE_DW_END(ID, NAME) DW_END_##NAME = ID,
+#include "llvm/BinaryFormat/Dwarf.def"
DW_END_lo_user = 0x40,
DW_END_hi_user = 0xff
};
@@ -184,7 +184,8 @@ enum DefaultedMemberAttribute {
};
enum SourceLanguage {
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) DW_LANG_##NAME = ID,
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR) \
+ DW_LANG_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
DW_LANG_lo_user = 0x8000,
DW_LANG_hi_user = 0xffff
@@ -273,6 +274,7 @@ enum RangeListEntries {
/// Call frame instruction encodings.
enum CallFrameInfo {
#define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID,
+#define HANDLE_DW_CFA_PRED(ID, NAME, ARCH) DW_CFA_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
DW_CFA_extended = 0x00,
@@ -431,7 +433,7 @@ StringRef LNStandardString(unsigned Standard);
StringRef LNExtendedString(unsigned Encoding);
StringRef MacinfoString(unsigned Encoding);
StringRef RangeListEncodingString(unsigned Encoding);
-StringRef CallFrameString(unsigned Encoding);
+StringRef CallFrameString(unsigned Encoding, Triple::ArchType Arch);
StringRef ApplePropertyString(unsigned);
StringRef UnitTypeString(unsigned);
StringRef AtomTypeString(unsigned Atom);
@@ -489,6 +491,8 @@ unsigned AttributeEncodingVendor(TypeKind E);
unsigned LanguageVendor(SourceLanguage L);
/// @}
+Optional<unsigned> LanguageLowerBound(SourceLanguage L);
+
/// A helper struct providing information about the byte size of DW_FORM
/// values that vary in size depending on the DWARF version, address byte
/// size, or DWARF32/DWARF64.
diff --git a/contrib/llvm/include/llvm/BinaryFormat/ELF.h b/contrib/llvm/include/llvm/BinaryFormat/ELF.h
index 2e778779117b..ce35d127d433 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/contrib/llvm/include/llvm/BinaryFormat/ELF.h
@@ -582,6 +582,7 @@ enum {
EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60
EF_HEXAGON_MACH_V62 = 0x00000062, // Hexagon V62
EF_HEXAGON_MACH_V65 = 0x00000065, // Hexagon V65
+ EF_HEXAGON_MACH_V66 = 0x00000066, // Hexagon V66
// Highest ISA version flags
EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
@@ -594,6 +595,7 @@ enum {
EF_HEXAGON_ISA_V60 = 0x00000060, // Hexagon V60 ISA
EF_HEXAGON_ISA_V62 = 0x00000062, // Hexagon V62 ISA
EF_HEXAGON_ISA_V65 = 0x00000065, // Hexagon V65 ISA
+ EF_HEXAGON_ISA_V66 = 0x00000066, // Hexagon V66 ISA
};
// Hexagon-specific section indexes for common small data
@@ -701,6 +703,7 @@ enum : unsigned {
EF_AMDGPU_MACH_AMDGCN_GFX902 = 0x02d,
EF_AMDGPU_MACH_AMDGCN_GFX904 = 0x02e,
EF_AMDGPU_MACH_AMDGCN_GFX906 = 0x02f,
+ EF_AMDGPU_MACH_AMDGCN_GFX909 = 0x031,
// Reserved for AMDGCN-based processors.
EF_AMDGPU_MACH_AMDGCN_RESERVED0 = 0x027,
@@ -708,11 +711,14 @@ enum : unsigned {
// First/last AMDGCN-based processors.
EF_AMDGPU_MACH_AMDGCN_FIRST = EF_AMDGPU_MACH_AMDGCN_GFX600,
- EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_GFX906,
+ EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_GFX909,
- // Indicates if the xnack target feature is enabled for all code contained in
- // the object.
+ // Indicates if the "xnack" target feature is enabled for all code contained
+ // in the object.
EF_AMDGPU_XNACK = 0x100,
+ // Indicates if the "sram-ecc" target feature is enabled for all code
+ // contained in the object.
+ EF_AMDGPU_SRAM_ECC = 0x200,
};
// ELF Relocation types for AMDGPU
@@ -725,6 +731,38 @@ enum {
#include "ELFRelocs/BPF.def"
};
+// MSP430 specific e_flags
+enum : unsigned {
+ EF_MSP430_MACH_MSP430x11 = 11,
+ EF_MSP430_MACH_MSP430x11x1 = 110,
+ EF_MSP430_MACH_MSP430x12 = 12,
+ EF_MSP430_MACH_MSP430x13 = 13,
+ EF_MSP430_MACH_MSP430x14 = 14,
+ EF_MSP430_MACH_MSP430x15 = 15,
+ EF_MSP430_MACH_MSP430x16 = 16,
+ EF_MSP430_MACH_MSP430x20 = 20,
+ EF_MSP430_MACH_MSP430x22 = 22,
+ EF_MSP430_MACH_MSP430x23 = 23,
+ EF_MSP430_MACH_MSP430x24 = 24,
+ EF_MSP430_MACH_MSP430x26 = 26,
+ EF_MSP430_MACH_MSP430x31 = 31,
+ EF_MSP430_MACH_MSP430x32 = 32,
+ EF_MSP430_MACH_MSP430x33 = 33,
+ EF_MSP430_MACH_MSP430x41 = 41,
+ EF_MSP430_MACH_MSP430x42 = 42,
+ EF_MSP430_MACH_MSP430x43 = 43,
+ EF_MSP430_MACH_MSP430x44 = 44,
+ EF_MSP430_MACH_MSP430X = 45,
+ EF_MSP430_MACH_MSP430x46 = 46,
+ EF_MSP430_MACH_MSP430x47 = 47,
+ EF_MSP430_MACH_MSP430x54 = 54,
+};
+
+// ELF Relocation types for MSP430
+enum {
+#include "ELFRelocs/MSP430.def"
+};
+
#undef ELF_RELOC
// Section header.
@@ -829,6 +867,8 @@ enum : unsigned {
SHT_MIPS_DWARF = 0x7000001e, // DWARF debugging section.
SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.
+ SHT_MSP430_ATTRIBUTES = 0x70000003U,
+
SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
@@ -1321,7 +1361,7 @@ enum {
GNU_PROPERTY_X86_FEATURE_1_SHSTK = 1 << 1
};
-// AMDGPU specific notes.
+// AMD specific notes. (Code Object V2)
enum {
// Note types with values between 0 and 9 (inclusive) are reserved.
NT_AMD_AMDGPU_HSA_METADATA = 10,
@@ -1329,6 +1369,12 @@ enum {
NT_AMD_AMDGPU_PAL_METADATA = 12
};
+// AMDGPU specific notes. (Code Object V3)
+enum {
+ // Note types with values between 0 and 31 (inclusive) are reserved.
+ NT_AMDGPU_METADATA = 32
+};
+
enum {
GNU_ABI_TAG_LINUX = 0,
GNU_ABI_TAG_HURD = 1,
@@ -1339,6 +1385,8 @@ enum {
GNU_ABI_TAG_NACL = 6,
};
+constexpr const char *ELF_NOTE_GNU = "GNU";
+
// Android packed relocation group flags.
enum {
RELOCATION_GROUPED_BY_INFO_FLAG = 1,
diff --git a/contrib/llvm/include/llvm/BinaryFormat/ELFRelocs/MSP430.def b/contrib/llvm/include/llvm/BinaryFormat/ELFRelocs/MSP430.def
new file mode 100644
index 000000000000..96990abf2db4
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/ELFRelocs/MSP430.def
@@ -0,0 +1,16 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_MSP430_NONE, 0)
+ELF_RELOC(R_MSP430_32, 1)
+ELF_RELOC(R_MSP430_10_PCREL, 2)
+ELF_RELOC(R_MSP430_16, 3)
+ELF_RELOC(R_MSP430_16_PCREL, 4)
+ELF_RELOC(R_MSP430_16_BYTE, 5)
+ELF_RELOC(R_MSP430_16_PCREL_BYTE, 6)
+ELF_RELOC(R_MSP430_2X_PCREL, 7)
+ELF_RELOC(R_MSP430_RL_PCREL, 8)
+ELF_RELOC(R_MSP430_8, 9)
+ELF_RELOC(R_MSP430_SYM_DIFF, 10)
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MachO.h b/contrib/llvm/include/llvm/BinaryFormat/MachO.h
index c5294c76ebf7..b3d60984249f 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/MachO.h
+++ b/contrib/llvm/include/llvm/BinaryFormat/MachO.h
@@ -486,7 +486,10 @@ enum PlatformType {
PLATFORM_IOS = 2,
PLATFORM_TVOS = 3,
PLATFORM_WATCHOS = 4,
- PLATFORM_BRIDGEOS = 5
+ PLATFORM_BRIDGEOS = 5,
+ PLATFORM_IOSSIMULATOR = 7,
+ PLATFORM_TVOSSIMULATOR = 8,
+ PLATFORM_WATCHOSSIMULATOR = 9
};
// Values for tools enum in build_tool_version.
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MsgPack.def b/contrib/llvm/include/llvm/BinaryFormat/MsgPack.def
new file mode 100644
index 000000000000..781b49f46aeb
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/MsgPack.def
@@ -0,0 +1,108 @@
+//===- MsgPack.def - MessagePack definitions --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Macros for running through MessagePack enumerators.
+///
+//===----------------------------------------------------------------------===//
+
+#if !( \
+ defined HANDLE_MP_FIRST_BYTE || defined HANDLE_MP_FIX_BITS || \
+ defined HANDLE_MP_FIX_BITS_MASK || defined HANDLE_MP_FIX_MAX || \
+ defined HANDLE_MP_FIX_LEN || defined HANDLE_MP_FIX_MIN)
+#error "Missing macro definition of HANDLE_MP*"
+#endif
+
+#ifndef HANDLE_MP_FIRST_BYTE
+#define HANDLE_MP_FIRST_BYTE(ID, NAME)
+#endif
+
+#ifndef HANDLE_MP_FIX_BITS
+#define HANDLE_MP_FIX_BITS(ID, NAME)
+#endif
+
+#ifndef HANDLE_MP_FIX_BITS_MASK
+#define HANDLE_MP_FIX_BITS_MASK(ID, NAME)
+#endif
+
+#ifndef HANDLE_MP_FIX_MAX
+#define HANDLE_MP_FIX_MAX(ID, NAME)
+#endif
+
+#ifndef HANDLE_MP_FIX_LEN
+#define HANDLE_MP_FIX_LEN(ID, NAME)
+#endif
+
+#ifndef HANDLE_MP_FIX_MIN
+#define HANDLE_MP_FIX_MIN(ID, NAME)
+#endif
+
+HANDLE_MP_FIRST_BYTE(0xc0, Nil)
+HANDLE_MP_FIRST_BYTE(0xc2, False)
+HANDLE_MP_FIRST_BYTE(0xc3, True)
+HANDLE_MP_FIRST_BYTE(0xc4, Bin8)
+HANDLE_MP_FIRST_BYTE(0xc5, Bin16)
+HANDLE_MP_FIRST_BYTE(0xc6, Bin32)
+HANDLE_MP_FIRST_BYTE(0xc7, Ext8)
+HANDLE_MP_FIRST_BYTE(0xc8, Ext16)
+HANDLE_MP_FIRST_BYTE(0xc9, Ext32)
+HANDLE_MP_FIRST_BYTE(0xca, Float32)
+HANDLE_MP_FIRST_BYTE(0xcb, Float64)
+HANDLE_MP_FIRST_BYTE(0xcc, UInt8)
+HANDLE_MP_FIRST_BYTE(0xcd, UInt16)
+HANDLE_MP_FIRST_BYTE(0xce, UInt32)
+HANDLE_MP_FIRST_BYTE(0xcf, UInt64)
+HANDLE_MP_FIRST_BYTE(0xd0, Int8)
+HANDLE_MP_FIRST_BYTE(0xd1, Int16)
+HANDLE_MP_FIRST_BYTE(0xd2, Int32)
+HANDLE_MP_FIRST_BYTE(0xd3, Int64)
+HANDLE_MP_FIRST_BYTE(0xd4, FixExt1)
+HANDLE_MP_FIRST_BYTE(0xd5, FixExt2)
+HANDLE_MP_FIRST_BYTE(0xd6, FixExt4)
+HANDLE_MP_FIRST_BYTE(0xd7, FixExt8)
+HANDLE_MP_FIRST_BYTE(0xd8, FixExt16)
+HANDLE_MP_FIRST_BYTE(0xd9, Str8)
+HANDLE_MP_FIRST_BYTE(0xda, Str16)
+HANDLE_MP_FIRST_BYTE(0xdb, Str32)
+HANDLE_MP_FIRST_BYTE(0xdc, Array16)
+HANDLE_MP_FIRST_BYTE(0xdd, Array32)
+HANDLE_MP_FIRST_BYTE(0xde, Map16)
+HANDLE_MP_FIRST_BYTE(0xdf, Map32)
+
+HANDLE_MP_FIX_BITS(0x00, PositiveInt)
+HANDLE_MP_FIX_BITS(0x80, Map)
+HANDLE_MP_FIX_BITS(0x90, Array)
+HANDLE_MP_FIX_BITS(0xa0, String)
+HANDLE_MP_FIX_BITS(0xe0, NegativeInt)
+
+HANDLE_MP_FIX_BITS_MASK(0x80, PositiveInt)
+HANDLE_MP_FIX_BITS_MASK(0xf0, Map)
+HANDLE_MP_FIX_BITS_MASK(0xf0, Array)
+HANDLE_MP_FIX_BITS_MASK(0xe0, String)
+HANDLE_MP_FIX_BITS_MASK(0xe0, NegativeInt)
+
+HANDLE_MP_FIX_MAX(0x7f, PositiveInt)
+HANDLE_MP_FIX_MAX(0x0f, Map)
+HANDLE_MP_FIX_MAX(0x0f, Array)
+HANDLE_MP_FIX_MAX(0x1f, String)
+
+HANDLE_MP_FIX_LEN(0x01, Ext1)
+HANDLE_MP_FIX_LEN(0x02, Ext2)
+HANDLE_MP_FIX_LEN(0x04, Ext4)
+HANDLE_MP_FIX_LEN(0x08, Ext8)
+HANDLE_MP_FIX_LEN(0x10, Ext16)
+
+HANDLE_MP_FIX_MIN(-0x20, NegativeInt)
+
+#undef HANDLE_MP_FIRST_BYTE
+#undef HANDLE_MP_FIX_BITS
+#undef HANDLE_MP_FIX_BITS_MASK
+#undef HANDLE_MP_FIX_MAX
+#undef HANDLE_MP_FIX_LEN
+#undef HANDLE_MP_FIX_MIN
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MsgPack.h b/contrib/llvm/include/llvm/BinaryFormat/MsgPack.h
new file mode 100644
index 000000000000..d431912a53e5
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/MsgPack.h
@@ -0,0 +1,93 @@
+//===-- MsgPack.h - MessagePack Constants -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains constants used for implementing MessagePack support.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BINARYFORMAT_MSGPACK_H
+#define LLVM_BINARYFORMAT_MSGPACK_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace msgpack {
+
+/// The endianness of all multi-byte encoded values in MessagePack.
+constexpr support::endianness Endianness = support::big;
+
+/// The first byte identifiers of MessagePack object formats.
+namespace FirstByte {
+#define HANDLE_MP_FIRST_BYTE(ID, NAME) constexpr uint8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+/// Most significant bits used to identify "Fix" variants in MessagePack.
+///
+/// For example, FixStr objects encode their size in the five least significant
+/// bits of their first byte, which is identified by the bit pattern "101" in
+/// the three most significant bits. So FixBits::String contains 0b10100000.
+///
+/// A corresponding mask of the bit pattern is found in \c FixBitsMask.
+namespace FixBits {
+#define HANDLE_MP_FIX_BITS(ID, NAME) constexpr uint8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+/// Mask of bits used to identify "Fix" variants in MessagePack.
+///
+/// For example, FixStr objects encode their size in the five least significant
+/// bits of their first byte, which is identified by the bit pattern "101" in
+/// the three most significant bits. So FixBitsMask::String contains
+/// 0b11100000.
+///
+/// The corresponding bit pattern to mask for is found in FixBits.
+namespace FixBitsMask {
+#define HANDLE_MP_FIX_BITS_MASK(ID, NAME) constexpr uint8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+/// The maximum value or size encodable in "Fix" variants of formats.
+///
+/// For example, FixStr objects encode their size in the five least significant
+/// bits of their first byte, so the largest encodable size is 0b00011111.
+namespace FixMax {
+#define HANDLE_MP_FIX_MAX(ID, NAME) constexpr uint8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+/// The exact size encodable in "Fix" variants of formats.
+///
+/// The only objects for which an exact size makes sense are of Extension type.
+///
+/// For example, FixExt4 stores an extension type containing exactly four bytes.
+namespace FixLen {
+#define HANDLE_MP_FIX_LEN(ID, NAME) constexpr uint8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+/// The minimum value or size encodable in "Fix" variants of formats.
+///
+/// The only object for which a minimum makes sense is a negative FixNum.
+///
+/// Negative FixNum objects encode their signed integer value in one byte, but
+/// they must have the pattern "111" as their three most significant bits. This
+/// means all values are negative, and the smallest representable value is
+/// 0b11100000.
+namespace FixMin {
+#define HANDLE_MP_FIX_MIN(ID, NAME) constexpr int8_t NAME = ID;
+#include "llvm/BinaryFormat/MsgPack.def"
+}
+
+} // end namespace msgpack
+} // end namespace llvm
+
+#endif // LLVM_BINARYFORMAT_MSGPACK_H
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MsgPackReader.h b/contrib/llvm/include/llvm/BinaryFormat/MsgPackReader.h
new file mode 100644
index 000000000000..511c31407455
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/MsgPackReader.h
@@ -0,0 +1,148 @@
+//===- MsgPackReader.h - Simple MsgPack reader ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is a MessagePack reader.
+///
+/// See https://github.com/msgpack/msgpack/blob/master/spec.md for the full
+/// standard.
+///
+/// Typical usage:
+/// \code
+/// StringRef input = GetInput();
+/// msgpack::Reader MPReader(input);
+/// msgpack::Object Obj;
+///
+/// while (MPReader.read(Obj)) {
+/// switch (Obj.Kind) {
+/// case msgpack::Type::Int:
+// // Use Obj.Int
+/// break;
+/// // ...
+/// }
+/// }
+/// \endcode
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MSGPACKREADER_H
+#define LLVM_SUPPORT_MSGPACKREADER_H
+
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
+
+namespace llvm {
+namespace msgpack {
+
+/// MessagePack types as defined in the standard, with the exception of Integer
+/// being divided into a signed Int and unsigned UInt variant in order to map
+/// directly to C++ types.
+///
+/// The types map onto corresponding union members of the \c Object struct.
+enum class Type : uint8_t {
+ Int,
+ UInt,
+ Nil,
+ Boolean,
+ Float,
+ String,
+ Binary,
+ Array,
+ Map,
+ Extension,
+};
+
+/// Extension types are composed of a user-defined type ID and an uninterpreted
+/// sequence of bytes.
+struct ExtensionType {
+ /// User-defined extension type.
+ int8_t Type;
+ /// Raw bytes of the extension object.
+ StringRef Bytes;
+};
+
+/// MessagePack object, represented as a tagged union of C++ types.
+///
+/// All types except \c Type::Nil (which has only one value, and so is
+/// completely represented by the \c Kind itself) map to a exactly one union
+/// member.
+struct Object {
+ Type Kind;
+ union {
+ /// Value for \c Type::Int.
+ int64_t Int;
+ /// Value for \c Type::Uint.
+ uint64_t UInt;
+ /// Value for \c Type::Boolean.
+ bool Bool;
+ /// Value for \c Type::Float.
+ double Float;
+ /// Value for \c Type::String and \c Type::Binary.
+ StringRef Raw;
+ /// Value for \c Type::Array and \c Type::Map.
+ size_t Length;
+ /// Value for \c Type::Extension.
+ ExtensionType Extension;
+ };
+
+ Object() : Kind(Type::Int), Int(0) {}
+};
+
+/// Reads MessagePack objects from memory, one at a time.
+class Reader {
+public:
+ /// Construct a reader, keeping a reference to the \p InputBuffer.
+ Reader(MemoryBufferRef InputBuffer);
+ /// Construct a reader, keeping a reference to the \p Input.
+ Reader(StringRef Input);
+
+ Reader(const Reader &) = delete;
+ Reader &operator=(const Reader &) = delete;
+
+ /// Read one object from the input buffer, advancing past it.
+ ///
+ /// The \p Obj is updated with the kind of the object read, and the
+ /// corresponding union member is updated.
+ ///
+ /// For the collection objects (Array and Map), only the length is read, and
+ /// the caller must make and additional \c N calls (in the case of Array) or
+ /// \c N*2 calls (in the case of Map) to \c Read to retrieve the collection
+ /// elements.
+ ///
+ /// \param [out] Obj filled with next object on success.
+ ///
+ /// \returns true when object successfully read, false when at end of
+ /// input (and so \p Obj was not updated), otherwise an error.
+ Expected<bool> read(Object &Obj);
+
+private:
+ MemoryBufferRef InputBuffer;
+ StringRef::iterator Current;
+ StringRef::iterator End;
+
+ size_t remainingSpace() {
+ // The rest of the code maintains the invariant that End >= Current, so
+ // that this cast is always defined behavior.
+ return static_cast<size_t>(End - Current);
+ }
+
+ template <class T> Expected<bool> readRaw(Object &Obj);
+ template <class T> Expected<bool> readInt(Object &Obj);
+ template <class T> Expected<bool> readUInt(Object &Obj);
+ template <class T> Expected<bool> readLength(Object &Obj);
+ template <class T> Expected<bool> readExt(Object &Obj);
+ Expected<bool> createRaw(Object &Obj, uint32_t Size);
+ Expected<bool> createExt(Object &Obj, uint32_t Size);
+};
+
+} // end namespace msgpack
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MSGPACKREADER_H
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MsgPackTypes.h b/contrib/llvm/include/llvm/BinaryFormat/MsgPackTypes.h
new file mode 100644
index 000000000000..f96cd4c338fd
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/MsgPackTypes.h
@@ -0,0 +1,372 @@
+//===- MsgPackTypes.h - MsgPack Types ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This is a data structure for representing MessagePack "documents", with
+/// methods to go to and from MessagePack. The types also specialize YAMLIO
+/// traits in order to go to and from YAML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/BinaryFormat/MsgPackReader.h"
+#include "llvm/BinaryFormat/MsgPackWriter.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <vector>
+
+#ifndef LLVM_BINARYFORMAT_MSGPACKTYPES_H
+#define LLVM_BINARYFORMAT_MSGPACKTYPES_H
+
+namespace llvm {
+namespace msgpack {
+
+class Node;
+
+/// Short-hand for a Node pointer.
+using NodePtr = std::shared_ptr<Node>;
+
+/// Short-hand for an Optional Node pointer.
+using OptNodePtr = Optional<NodePtr>;
+
+/// Abstract base-class which can be any MessagePack type.
+class Node {
+public:
+ enum NodeKind {
+ NK_Scalar,
+ NK_Array,
+ NK_Map,
+ };
+
+private:
+ virtual void anchor() = 0;
+ const NodeKind Kind;
+
+ static Expected<OptNodePtr> readArray(Reader &MPReader, size_t Length);
+ static Expected<OptNodePtr> readMap(Reader &MPReader, size_t Length);
+
+public:
+ NodeKind getKind() const { return Kind; }
+
+ /// Construct a Node. Used by derived classes to track kind information.
+ Node(NodeKind Kind) : Kind(Kind) {}
+
+ virtual ~Node() = default;
+
+ /// Read from a MessagePack reader \p MPReader, returning an error if one is
+ /// encountered, or None if \p MPReader is at the end of stream, or some Node
+ /// pointer if some type is read.
+ static Expected<OptNodePtr> read(Reader &MPReader);
+
+ /// Write to a MessagePack writer \p MPWriter.
+ virtual void write(Writer &MPWriter) = 0;
+};
+
+/// A MessagePack scalar.
+class ScalarNode : public Node {
+public:
+ enum ScalarKind {
+ SK_Int,
+ SK_UInt,
+ SK_Nil,
+ SK_Boolean,
+ SK_Float,
+ SK_String,
+ SK_Binary,
+ };
+
+private:
+ void anchor() override;
+
+ void destroy();
+
+ ScalarKind SKind;
+
+ union {
+ int64_t IntValue;
+ uint64_t UIntValue;
+ bool BoolValue;
+ double FloatValue;
+ std::string StringValue;
+ };
+
+public:
+ /// Construct an Int ScalarNode.
+ ScalarNode(int64_t IntValue);
+ /// Construct an Int ScalarNode.
+ ScalarNode(int32_t IntValue);
+ /// Construct an UInt ScalarNode.
+ ScalarNode(uint64_t UIntValue);
+ /// Construct an UInt ScalarNode.
+ ScalarNode(uint32_t UIntValue);
+ /// Construct a Nil ScalarNode.
+ ScalarNode();
+ /// Construct a Boolean ScalarNode.
+ ScalarNode(bool BoolValue);
+ /// Construct a Float ScalarNode.
+ ScalarNode(double FloatValue);
+ /// Construct a String ScalarNode.
+ ScalarNode(StringRef StringValue);
+ /// Construct a String ScalarNode.
+ ScalarNode(const char *StringValue);
+ /// Construct a String ScalarNode.
+ ScalarNode(std::string &&StringValue);
+ /// Construct a Binary ScalarNode.
+ ScalarNode(MemoryBufferRef BinaryValue);
+
+ ~ScalarNode();
+
+ ScalarNode &operator=(const ScalarNode &RHS) = delete;
+ /// A ScalarNode can only be move assigned.
+ ScalarNode &operator=(ScalarNode &&RHS);
+
+ /// Change the kind of this ScalarNode, zero initializing it to the new type.
+ void setScalarKind(ScalarKind SKind) {
+ switch (SKind) {
+ case SK_Int:
+ *this = int64_t(0);
+ break;
+ case SK_UInt:
+ *this = uint64_t(0);
+ break;
+ case SK_Boolean:
+ *this = false;
+ break;
+ case SK_Float:
+ *this = 0.0;
+ break;
+ case SK_String:
+ *this = StringRef();
+ break;
+ case SK_Binary:
+ *this = MemoryBufferRef("", "");
+ break;
+ case SK_Nil:
+ *this = ScalarNode();
+ break;
+ }
+ }
+
+ /// Get the current kind of ScalarNode.
+ ScalarKind getScalarKind() { return SKind; }
+
+ /// Get the value of an Int scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_Int
+ int64_t getInt() {
+ assert(SKind == SK_Int);
+ return IntValue;
+ }
+
+ /// Get the value of a UInt scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_UInt
+ uint64_t getUInt() {
+ assert(SKind == SK_UInt);
+ return UIntValue;
+ }
+
+ /// Get the value of an Boolean scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_Boolean
+ bool getBool() {
+ assert(SKind == SK_Boolean);
+ return BoolValue;
+ }
+
+ /// Get the value of an Float scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_Float
+ double getFloat() {
+ assert(SKind == SK_Float);
+ return FloatValue;
+ }
+
+ /// Get the value of a String scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_String
+ StringRef getString() {
+ assert(SKind == SK_String);
+ return StringValue;
+ }
+
+ /// Get the value of a Binary scalar.
+ ///
+ /// \warning Assumes getScalarKind() == SK_Binary
+ StringRef getBinary() {
+ assert(SKind == SK_Binary);
+ return StringValue;
+ }
+
+ static bool classof(const Node *N) { return N->getKind() == NK_Scalar; }
+
+ void write(Writer &MPWriter) override;
+
+ /// Parse a YAML scalar of the current ScalarKind from \p ScalarStr.
+ ///
+ /// \returns An empty string on success, otherwise an error message.
+ StringRef inputYAML(StringRef ScalarStr);
+
+ /// Output a YAML scalar of the current ScalarKind into \p OS.
+ void outputYAML(raw_ostream &OS) const;
+
+ /// Determine which YAML quoting type the current value would need when
+ /// output.
+ yaml::QuotingType mustQuoteYAML(StringRef ScalarStr) const;
+
+ /// Get the YAML tag for the current ScalarKind.
+ StringRef getYAMLTag() const;
+
+ /// Flag which affects how the type handles YAML tags when reading and
+ /// writing.
+ ///
+ /// When false, tags are used when reading and writing. When reading, the tag
+ /// is used to decide the ScalarKind before parsing. When writing, the tag is
+ /// output along with the value.
+ ///
+ /// When true, tags are ignored when reading and writing. When reading, the
+ /// ScalarKind is always assumed to be String. When writing, the tag is not
+ /// output.
+ bool IgnoreTag = false;
+
+ static const char *IntTag;
+ static const char *NilTag;
+ static const char *BooleanTag;
+ static const char *FloatTag;
+ static const char *StringTag;
+ static const char *BinaryTag;
+};
+
+class ArrayNode : public Node, public std::vector<NodePtr> {
+ void anchor() override;
+
+public:
+ ArrayNode() : Node(NK_Array) {}
+ static bool classof(const Node *N) { return N->getKind() == NK_Array; }
+
+ void write(Writer &MPWriter) override {
+ MPWriter.writeArraySize(this->size());
+ for (auto &N : *this)
+ N->write(MPWriter);
+ }
+};
+
+class MapNode : public Node, public StringMap<NodePtr> {
+ void anchor() override;
+
+public:
+ MapNode() : Node(NK_Map) {}
+ static bool classof(const Node *N) { return N->getKind() == NK_Map; }
+
+ void write(Writer &MPWriter) override {
+ MPWriter.writeMapSize(this->size());
+ for (auto &N : *this) {
+ MPWriter.write(N.first());
+ N.second->write(MPWriter);
+ }
+ }
+};
+
+} // end namespace msgpack
+
+namespace yaml {
+
+template <> struct PolymorphicTraits<msgpack::NodePtr> {
+ static NodeKind getKind(const msgpack::NodePtr &N) {
+ if (isa<msgpack::ScalarNode>(*N))
+ return NodeKind::Scalar;
+ if (isa<msgpack::MapNode>(*N))
+ return NodeKind::Map;
+ if (isa<msgpack::ArrayNode>(*N))
+ return NodeKind::Sequence;
+ llvm_unreachable("NodeKind not supported");
+ }
+ static msgpack::ScalarNode &getAsScalar(msgpack::NodePtr &N) {
+ if (!N || !isa<msgpack::ScalarNode>(*N))
+ N.reset(new msgpack::ScalarNode());
+ return *cast<msgpack::ScalarNode>(N.get());
+ }
+ static msgpack::MapNode &getAsMap(msgpack::NodePtr &N) {
+ if (!N || !isa<msgpack::MapNode>(*N))
+ N.reset(new msgpack::MapNode());
+ return *cast<msgpack::MapNode>(N.get());
+ }
+ static msgpack::ArrayNode &getAsSequence(msgpack::NodePtr &N) {
+ if (!N || !isa<msgpack::ArrayNode>(*N))
+ N.reset(new msgpack::ArrayNode());
+ return *cast<msgpack::ArrayNode>(N.get());
+ }
+};
+
+template <> struct TaggedScalarTraits<msgpack::ScalarNode> {
+ static void output(const msgpack::ScalarNode &S, void *Ctxt,
+ raw_ostream &ScalarOS, raw_ostream &TagOS) {
+ if (!S.IgnoreTag)
+ TagOS << S.getYAMLTag();
+ S.outputYAML(ScalarOS);
+ }
+
+ static StringRef input(StringRef ScalarStr, StringRef Tag, void *Ctxt,
+ msgpack::ScalarNode &S) {
+ if (Tag == msgpack::ScalarNode::IntTag) {
+ S.setScalarKind(msgpack::ScalarNode::SK_UInt);
+ if (S.inputYAML(ScalarStr) == StringRef())
+ return StringRef();
+ S.setScalarKind(msgpack::ScalarNode::SK_Int);
+ return S.inputYAML(ScalarStr);
+ }
+
+ if (S.IgnoreTag || Tag == msgpack::ScalarNode::StringTag ||
+ Tag == "tag:yaml.org,2002:str")
+ S.setScalarKind(msgpack::ScalarNode::SK_String);
+ else if (Tag == msgpack::ScalarNode::NilTag)
+ S.setScalarKind(msgpack::ScalarNode::SK_Nil);
+ else if (Tag == msgpack::ScalarNode::BooleanTag)
+ S.setScalarKind(msgpack::ScalarNode::SK_Boolean);
+ else if (Tag == msgpack::ScalarNode::FloatTag)
+ S.setScalarKind(msgpack::ScalarNode::SK_Float);
+ else if (Tag == msgpack::ScalarNode::StringTag)
+ S.setScalarKind(msgpack::ScalarNode::SK_String);
+ else if (Tag == msgpack::ScalarNode::BinaryTag)
+ S.setScalarKind(msgpack::ScalarNode::SK_Binary);
+ else
+ return "Unsupported messagepack tag";
+
+ return S.inputYAML(ScalarStr);
+ }
+
+ static QuotingType mustQuote(const msgpack::ScalarNode &S, StringRef Str) {
+ return S.mustQuoteYAML(Str);
+ }
+};
+
+template <> struct CustomMappingTraits<msgpack::MapNode> {
+ static void inputOne(IO &IO, StringRef Key, msgpack::MapNode &M) {
+ IO.mapRequired(Key.str().c_str(), M[Key]);
+ }
+ static void output(IO &IO, msgpack::MapNode &M) {
+ for (auto &N : M)
+ IO.mapRequired(N.getKey().str().c_str(), N.getValue());
+ }
+};
+
+template <> struct SequenceTraits<msgpack::ArrayNode> {
+ static size_t size(IO &IO, msgpack::ArrayNode &A) { return A.size(); }
+ static msgpack::NodePtr &element(IO &IO, msgpack::ArrayNode &A,
+ size_t Index) {
+ if (Index >= A.size())
+ A.resize(Index + 1);
+ return A[Index];
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_BINARYFORMAT_MSGPACKTYPES_H
diff --git a/contrib/llvm/include/llvm/BinaryFormat/MsgPackWriter.h b/contrib/llvm/include/llvm/BinaryFormat/MsgPackWriter.h
new file mode 100644
index 000000000000..98af422c9f19
--- /dev/null
+++ b/contrib/llvm/include/llvm/BinaryFormat/MsgPackWriter.h
@@ -0,0 +1,131 @@
+//===- MsgPackWriter.h - Simple MsgPack writer ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains a MessagePack writer.
+///
+/// See https://github.com/msgpack/msgpack/blob/master/spec.md for the full
+/// specification.
+///
+/// Typical usage:
+/// \code
+/// raw_ostream output = GetOutputStream();
+/// msgpack::Writer MPWriter(output);
+/// MPWriter.writeNil();
+/// MPWriter.write(false);
+/// MPWriter.write("string");
+/// // ...
+/// \endcode
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MSGPACKPARSER_H
+#define LLVM_SUPPORT_MSGPACKPARSER_H
+
+#include "llvm/BinaryFormat/MsgPack.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace msgpack {
+
+/// Writes MessagePack objects to an output stream, one at a time.
+class Writer {
+public:
+ /// Construct a writer, optionally enabling "Compatibility Mode" as defined
+ /// in the MessagePack specification.
+ ///
+ /// When in \p Compatible mode, the writer will write \c Str16 formats
+ /// instead of \c Str8 formats, and will refuse to write any \c Bin formats.
+ ///
+ /// \param OS stream to output MessagePack objects to.
+ /// \param Compatible when set, write in "Compatibility Mode".
+ Writer(raw_ostream &OS, bool Compatible = false);
+
+ Writer(const Writer &) = delete;
+ Writer &operator=(const Writer &) = delete;
+
+ /// Write a \em Nil to the output stream.
+ ///
+ /// The output will be the \em nil format.
+ void writeNil();
+
+ /// Write a \em Boolean to the output stream.
+ ///
+ /// The output will be a \em bool format.
+ void write(bool b);
+
+ /// Write a signed integer to the output stream.
+ ///
+ /// The output will be in the smallest possible \em int format.
+ ///
+ /// The format chosen may be for an unsigned integer.
+ void write(int64_t i);
+
+ /// Write an unsigned integer to the output stream.
+ ///
+ /// The output will be in the smallest possible \em int format.
+ void write(uint64_t u);
+
+ /// Write a floating point number to the output stream.
+ ///
+ /// The output will be in the smallest possible \em float format.
+ void write(double d);
+
+ /// Write a string to the output stream.
+ ///
+ /// The output will be in the smallest possible \em str format.
+ void write(StringRef s);
+
+ /// Write a memory buffer to the output stream.
+ ///
+ /// The output will be in the smallest possible \em bin format.
+ ///
+ /// \warning Do not use this overload if in \c Compatible mode.
+ void write(MemoryBufferRef Buffer);
+
+ /// Write the header for an \em Array of the given size.
+ ///
+ /// The output will be in the smallest possible \em array format.
+ //
+ /// The header contains an identifier for the \em array format used, as well
+ /// as an encoding of the size of the array.
+ ///
+ /// N.B. The caller must subsequently call \c Write an additional \p Size
+ /// times to complete the array.
+ void writeArraySize(uint32_t Size);
+
+ /// Write the header for a \em Map of the given size.
+ ///
+ /// The output will be in the smallest possible \em map format.
+ //
+ /// The header contains an identifier for the \em map format used, as well
+ /// as an encoding of the size of the map.
+ ///
+ /// N.B. The caller must subsequently call \c Write and additional \c Size*2
+ /// times to complete the map. Each even numbered call to \c Write defines a
+ /// new key, and each odd numbered call defines the previous key's value.
+ void writeMapSize(uint32_t Size);
+
+ /// Write a typed memory buffer (an extension type) to the output stream.
+ ///
+ /// The output will be in the smallest possible \em ext format.
+ void writeExt(int8_t Type, MemoryBufferRef Buffer);
+
+private:
+ support::endian::Writer EW;
+ bool Compatible;
+};
+
+} // end namespace msgpack
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MSGPACKPARSER_H
diff --git a/contrib/llvm/include/llvm/BinaryFormat/Wasm.h b/contrib/llvm/include/llvm/BinaryFormat/Wasm.h
index fa5448dacec4..d9f0f94b298d 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/Wasm.h
+++ b/contrib/llvm/include/llvm/BinaryFormat/Wasm.h
@@ -16,6 +16,7 @@
#define LLVM_BINARYFORMAT_WASM_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
namespace wasm {
@@ -25,7 +26,7 @@ const char WasmMagic[] = {'\0', 'a', 's', 'm'};
// Wasm binary format version
const uint32_t WasmVersion = 0x1;
// Wasm linking metadata version
-const uint32_t WasmMetadataVersion = 0x1;
+const uint32_t WasmMetadataVersion = 0x2;
// Wasm uses a 64k page size
const uint32_t WasmPageSize = 65536;
@@ -34,9 +35,12 @@ struct WasmObjectHeader {
uint32_t Version;
};
-struct WasmSignature {
- std::vector<uint8_t> ParamTypes;
- uint8_t ReturnType;
+struct WasmDylinkInfo {
+ uint32_t MemorySize; // Memory size in bytes
+ uint32_t MemoryAlignment; // P2 alignment of memory
+ uint32_t TableSize; // Table size in elements
+ uint32_t TableAlignment; // P2 alignment of table
+ std::vector<StringRef> Needed; // Shared library depenedencies
};
struct WasmExport {
@@ -79,6 +83,18 @@ struct WasmGlobal {
StringRef SymbolName; // from the "linking" section
};
+struct WasmEventType {
+ // Kind of event. Currently only WASM_EVENT_ATTRIBUTE_EXCEPTION is possible.
+ uint32_t Attribute;
+ uint32_t SigIndex;
+};
+
+struct WasmEvent {
+ uint32_t Index;
+ WasmEventType Type;
+ StringRef SymbolName; // from the "linking" section
+};
+
struct WasmImport {
StringRef Module;
StringRef Field;
@@ -88,6 +104,7 @@ struct WasmImport {
WasmGlobalType Global;
WasmTable Table;
WasmLimits Memory;
+ WasmEventType Event;
};
};
@@ -104,8 +121,8 @@ struct WasmFunction {
uint32_t Size;
uint32_t CodeOffset; // start of Locals and Body
StringRef SymbolName; // from the "linking" section
- StringRef DebugName; // from the "name" section
- uint32_t Comdat; // from the "comdat info" section
+ StringRef DebugName; // from the "name" section
+ uint32_t Comdat; // from the "comdat info" section
};
struct WasmDataSegment {
@@ -171,18 +188,20 @@ struct WasmLinkingData {
};
enum : unsigned {
- WASM_SEC_CUSTOM = 0, // Custom / User-defined section
- WASM_SEC_TYPE = 1, // Function signature declarations
- WASM_SEC_IMPORT = 2, // Import declarations
- WASM_SEC_FUNCTION = 3, // Function declarations
- WASM_SEC_TABLE = 4, // Indirect function table and other tables
- WASM_SEC_MEMORY = 5, // Memory attributes
- WASM_SEC_GLOBAL = 6, // Global declarations
- WASM_SEC_EXPORT = 7, // Exports
- WASM_SEC_START = 8, // Start function declaration
- WASM_SEC_ELEM = 9, // Elements section
- WASM_SEC_CODE = 10, // Function bodies (code)
- WASM_SEC_DATA = 11 // Data segments
+ WASM_SEC_CUSTOM = 0, // Custom / User-defined section
+ WASM_SEC_TYPE = 1, // Function signature declarations
+ WASM_SEC_IMPORT = 2, // Import declarations
+ WASM_SEC_FUNCTION = 3, // Function declarations
+ WASM_SEC_TABLE = 4, // Indirect function table and other tables
+ WASM_SEC_MEMORY = 5, // Memory attributes
+ WASM_SEC_GLOBAL = 6, // Global declarations
+ WASM_SEC_EXPORT = 7, // Exports
+ WASM_SEC_START = 8, // Start function declaration
+ WASM_SEC_ELEM = 9, // Elements section
+ WASM_SEC_CODE = 10, // Function bodies (code)
+ WASM_SEC_DATA = 11, // Data segments
+ WASM_SEC_DATACOUNT = 12, // Data segment count
+ WASM_SEC_EVENT = 13 // Event declarations
};
// Type immediate encodings used in various contexts.
@@ -191,7 +210,8 @@ enum : unsigned {
WASM_TYPE_I64 = 0x7E,
WASM_TYPE_F32 = 0x7D,
WASM_TYPE_F64 = 0x7C,
- WASM_TYPE_ANYFUNC = 0x70,
+ WASM_TYPE_V128 = 0x7B,
+ WASM_TYPE_FUNCREF = 0x70,
WASM_TYPE_EXCEPT_REF = 0x68,
WASM_TYPE_FUNC = 0x60,
WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
@@ -203,12 +223,13 @@ enum : unsigned {
WASM_EXTERNAL_TABLE = 0x1,
WASM_EXTERNAL_MEMORY = 0x2,
WASM_EXTERNAL_GLOBAL = 0x3,
+ WASM_EXTERNAL_EVENT = 0x4,
};
// Opcodes used in initializer expressions.
enum : unsigned {
WASM_OPCODE_END = 0x0b,
- WASM_OPCODE_GET_GLOBAL = 0x23,
+ WASM_OPCODE_GLOBAL_GET = 0x23,
WASM_OPCODE_I32_CONST = 0x41,
WASM_OPCODE_I64_CONST = 0x42,
WASM_OPCODE_F32_CONST = 0x43,
@@ -217,35 +238,27 @@ enum : unsigned {
enum : unsigned {
WASM_LIMITS_FLAG_HAS_MAX = 0x1,
-};
-
-// Subset of types that a value can have
-enum class ValType {
- I32 = WASM_TYPE_I32,
- I64 = WASM_TYPE_I64,
- F32 = WASM_TYPE_F32,
- F64 = WASM_TYPE_F64,
- EXCEPT_REF = WASM_TYPE_EXCEPT_REF,
+ WASM_LIMITS_FLAG_IS_SHARED = 0x2,
};
// Kind codes used in the custom "name" section
enum : unsigned {
WASM_NAMES_FUNCTION = 0x1,
- WASM_NAMES_LOCAL = 0x2,
+ WASM_NAMES_LOCAL = 0x2,
};
// Kind codes used in the custom "linking" section
enum : unsigned {
- WASM_SEGMENT_INFO = 0x5,
- WASM_INIT_FUNCS = 0x6,
- WASM_COMDAT_INFO = 0x7,
- WASM_SYMBOL_TABLE = 0x8,
+ WASM_SEGMENT_INFO = 0x5,
+ WASM_INIT_FUNCS = 0x6,
+ WASM_COMDAT_INFO = 0x7,
+ WASM_SYMBOL_TABLE = 0x8,
};
// Kind codes used in the custom "linking" section in the WASM_COMDAT_INFO
enum : unsigned {
- WASM_COMDAT_DATA = 0x0,
- WASM_COMDAT_FUNCTION = 0x1,
+ WASM_COMDAT_DATA = 0x0,
+ WASM_COMDAT_FUNCTION = 0x1,
};
// Kind codes used in the custom "linking" section in the WASM_SYMBOL_TABLE
@@ -254,17 +267,23 @@ enum WasmSymbolType : unsigned {
WASM_SYMBOL_TYPE_DATA = 0x1,
WASM_SYMBOL_TYPE_GLOBAL = 0x2,
WASM_SYMBOL_TYPE_SECTION = 0x3,
+ WASM_SYMBOL_TYPE_EVENT = 0x4,
+};
+
+// Kinds of event attributes.
+enum WasmEventAttribute : unsigned {
+ WASM_EVENT_ATTRIBUTE_EXCEPTION = 0x0,
};
-const unsigned WASM_SYMBOL_BINDING_MASK = 0x3;
-const unsigned WASM_SYMBOL_VISIBILITY_MASK = 0xc;
+const unsigned WASM_SYMBOL_BINDING_MASK = 0x3;
+const unsigned WASM_SYMBOL_VISIBILITY_MASK = 0xc;
-const unsigned WASM_SYMBOL_BINDING_GLOBAL = 0x0;
-const unsigned WASM_SYMBOL_BINDING_WEAK = 0x1;
-const unsigned WASM_SYMBOL_BINDING_LOCAL = 0x2;
+const unsigned WASM_SYMBOL_BINDING_GLOBAL = 0x0;
+const unsigned WASM_SYMBOL_BINDING_WEAK = 0x1;
+const unsigned WASM_SYMBOL_BINDING_LOCAL = 0x2;
const unsigned WASM_SYMBOL_VISIBILITY_DEFAULT = 0x0;
-const unsigned WASM_SYMBOL_VISIBILITY_HIDDEN = 0x4;
-const unsigned WASM_SYMBOL_UNDEFINED = 0x10;
+const unsigned WASM_SYMBOL_VISIBILITY_HIDDEN = 0x4;
+const unsigned WASM_SYMBOL_UNDEFINED = 0x10;
#define WASM_RELOC(name, value) name = value,
@@ -274,9 +293,32 @@ enum : unsigned {
#undef WASM_RELOC
+// Subset of types that a value can have
+enum class ValType {
+ I32 = WASM_TYPE_I32,
+ I64 = WASM_TYPE_I64,
+ F32 = WASM_TYPE_F32,
+ F64 = WASM_TYPE_F64,
+ V128 = WASM_TYPE_V128,
+ EXCEPT_REF = WASM_TYPE_EXCEPT_REF,
+};
+
+struct WasmSignature {
+ SmallVector<wasm::ValType, 1> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+ // Support empty and tombstone instances, needed by DenseMap.
+ enum { Plain, Empty, Tombstone } State = Plain;
+
+ WasmSignature(SmallVector<wasm::ValType, 1> &&InReturns,
+ SmallVector<wasm::ValType, 4> &&InParams)
+ : Returns(InReturns), Params(InParams) {}
+ WasmSignature() = default;
+};
+
// Useful comparison operators
inline bool operator==(const WasmSignature &LHS, const WasmSignature &RHS) {
- return LHS.ReturnType == RHS.ReturnType && LHS.ParamTypes == RHS.ParamTypes;
+ return LHS.State == RHS.State && LHS.Returns == RHS.Returns &&
+ LHS.Params == RHS.Params;
}
inline bool operator!=(const WasmSignature &LHS, const WasmSignature &RHS) {
diff --git a/contrib/llvm/include/llvm/BinaryFormat/WasmRelocs.def b/contrib/llvm/include/llvm/BinaryFormat/WasmRelocs.def
index 8ffd51e483f3..b3a08e70c1d5 100644
--- a/contrib/llvm/include/llvm/BinaryFormat/WasmRelocs.def
+++ b/contrib/llvm/include/llvm/BinaryFormat/WasmRelocs.def
@@ -1,4 +1,3 @@
-
#ifndef WASM_RELOC
#error "WASM_RELOC must be defined"
#endif
@@ -13,3 +12,4 @@ WASM_RELOC(R_WEBASSEMBLY_TYPE_INDEX_LEB, 6)
WASM_RELOC(R_WEBASSEMBLY_GLOBAL_INDEX_LEB, 7)
WASM_RELOC(R_WEBASSEMBLY_FUNCTION_OFFSET_I32, 8)
WASM_RELOC(R_WEBASSEMBLY_SECTION_OFFSET_I32, 9)
+WASM_RELOC(R_WEBASSEMBLY_EVENT_INDEX_LEB, 10)
diff --git a/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h b/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
index ce8bdd9cf0b4..0d7cc141f2ce 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
@@ -51,6 +51,7 @@ class Module;
struct BitcodeLTOInfo {
bool IsThinLTO;
bool HasSummary;
+ bool EnableSplitLTOUnit;
};
/// Represents a module in a bitcode file.
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 6723cf42dd2c..f0d11e9c1689 100644
--- a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -342,6 +342,7 @@ enum ConstantsCodes {
CST_CODE_INLINEASM = 23, // INLINEASM: [sideeffect|alignstack|
// asmdialect,asmstr,conststr]
CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, // [opty, flags, n x operands]
+ CST_CODE_CE_UNOP = 25, // CE_UNOP: [opcode, opval]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
@@ -364,6 +365,14 @@ enum CastOpcodes {
CAST_ADDRSPACECAST = 12
};
+/// UnaryOpcodes - These are values used in the bitcode files to encode which
+/// unop a CST_CODE_CE_UNOP or a XXX refers to. The values of these enums
+/// have no fixed relation to the LLVM IR enum values. Changing these will
+/// break compatibility with old files.
+enum UnaryOpcodes {
+ UNOP_NEG = 0
+};
+
/// BinaryOpcodes - These are values used in the bitcode files to encode which
/// binop a CST_CODE_CE_BINOP or a XXX refers to. The values of these enums
/// have no fixed relation to the LLVM IR enum values. Changing these will
@@ -524,6 +533,7 @@ enum FunctionCodes {
// 53 is unused.
// 54 is unused.
FUNC_CODE_OPERAND_BUNDLE = 55, // OPERAND_BUNDLE: [tag#, value...]
+ FUNC_CODE_INST_UNOP = 56, // UNOP: [opcode, ty, opval]
};
enum UseListCodes {
@@ -591,6 +601,7 @@ enum AttributeKindCodes {
ATTR_KIND_NOCF_CHECK = 56,
ATTR_KIND_OPT_FOR_FUZZING = 57,
ATTR_KIND_SHADOWCALLSTACK = 58,
+ ATTR_KIND_SPECULATIVE_LOAD_HARDENING = 59,
};
enum ComdatSelectionKindCodes {
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index b6056380916c..413901d218f9 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -71,6 +71,7 @@ class MCTargetOptions;
class MDNode;
class Module;
class raw_ostream;
+class StackMaps;
class TargetLoweringObjectFile;
class TargetMachine;
@@ -137,6 +138,9 @@ private:
static char ID;
+protected:
+ /// Protected struct HandlerInfo and Handlers permit target extended
+ /// AsmPrinter adds their own handlers.
struct HandlerInfo {
AsmPrinterHandler *Handler;
const char *TimerName;
@@ -365,6 +369,9 @@ public:
/// emit the proxies we previously omitted in EmitGlobalVariable.
void emitGlobalGOTEquivs();
+ /// Emit the stack maps.
+ void emitStackMaps(StackMaps &SM);
+
//===------------------------------------------------------------------===//
// Overridable Hooks
//===------------------------------------------------------------------===//
@@ -542,7 +549,7 @@ public:
///
/// \p Value - The value to emit.
/// \p Size - The size of the integer (in bytes) to emit.
- virtual void EmitDebugThreadLocal(const MCExpr *Value, unsigned Size) const;
+ virtual void EmitDebugValue(const MCExpr *Value, unsigned Size) const;
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
@@ -631,6 +638,11 @@ private:
/// inline asm.
void EmitInlineAsm(const MachineInstr *MI) const;
+ /// Add inline assembly info to the diagnostics machinery, so we can
+ /// emit file and position info. Returns SrcMgr memory buffer position.
+ unsigned addInlineAsmDiagBuffer(StringRef AsmStr,
+ const MDNode *LocMDNode) const;
+
//===------------------------------------------------------------------===//
// Internal Implementation Details
//===------------------------------------------------------------------===//
@@ -647,6 +659,8 @@ private:
void EmitLLVMUsedList(const ConstantArray *InitList);
/// Emit llvm.ident metadata in an '.ident' directive.
void EmitModuleIdents(Module &M);
+ /// Emit bytes for llvm.commandline metadata.
+ void EmitModuleCommandLines(Module &M);
void EmitXXStructorList(const DataLayout &DL, const Constant *List,
bool isCtor);
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinterHandler.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinterHandler.h
new file mode 100644
index 000000000000..a8b13200dd4e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinterHandler.h
@@ -0,0 +1,74 @@
+//===-- llvm/CodeGen/AsmPrinterHandler.h -----------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a generic interface for AsmPrinter handlers,
+// like debug and EH info emitters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTERHANDLER_H
+#define LLVM_CODEGEN_ASMPRINTERHANDLER_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MCSymbol;
+
+typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm);
+
+/// Collects and handles AsmPrinter objects required to build debug
+/// or EH information.
+class AsmPrinterHandler {
+public:
+ virtual ~AsmPrinterHandler();
+
+ /// For symbols that have a size designated (e.g. common symbols),
+ /// this tracks that size.
+ virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
+
+ /// Emit all sections that should come after the content.
+ virtual void endModule() = 0;
+
+ /// Gather pre-function debug information.
+ /// Every beginFunction(MF) call should be followed by an endFunction(MF)
+ /// call.
+ virtual void beginFunction(const MachineFunction *MF) = 0;
+
+ // Emit any of function marker (like .cfi_endproc). This is called
+ // before endFunction and cannot switch sections.
+ virtual void markFunctionEnd();
+
+ /// Gather post-function debug information.
+ /// Please note that some AsmPrinter implementations may not call
+ /// beginFunction at all.
+ virtual void endFunction(const MachineFunction *MF) = 0;
+
+ virtual void beginFragment(const MachineBasicBlock *MBB,
+ ExceptionSymbolProvider ESP) {}
+ virtual void endFragment() {}
+
+ /// Emit target-specific EH funclet machinery.
+ virtual void beginFunclet(const MachineBasicBlock &MBB,
+ MCSymbol *Sym = nullptr) {}
+ virtual void endFunclet() {}
+
+ /// Process beginning of an instruction.
+ virtual void beginInstruction(const MachineInstr *MI) = 0;
+
+ /// Process end of an instruction.
+ virtual void endInstruction() = 0;
+};
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index f76a2426377a..f105d887c397 100644
--- a/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -80,6 +80,23 @@ private:
using BaseT = TargetTransformInfoImplCRTPBase<T>;
using TTI = TargetTransformInfo;
+ /// Estimate a cost of Broadcast as an extract and sequence of insert
+ /// operations.
+ unsigned getBroadcastShuffleOverhead(Type *Ty) {
+ assert(Ty->isVectorTy() && "Can only shuffle vectors");
+ unsigned Cost = 0;
+ // Broadcast cost is equal to the cost of extracting the zero'th element
+ // plus the cost of inserting it into every element of the result vector.
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, Ty, 0);
+
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::InsertElement, Ty, i);
+ }
+ return Cost;
+ }
+
/// Estimate a cost of shuffle as a sequence of extract and insert
/// operations.
unsigned getPermuteShuffleOverhead(Type *Ty) {
@@ -101,6 +118,50 @@ private:
return Cost;
}
+ /// Estimate a cost of subvector extraction as a sequence of extract and
+ /// insert operations.
+ unsigned getExtractSubvectorOverhead(Type *Ty, int Index, Type *SubTy) {
+ assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() &&
+ "Can only extract subvectors from vectors");
+ int NumSubElts = SubTy->getVectorNumElements();
+ assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() &&
+ "SK_ExtractSubvector index out of range");
+
+ unsigned Cost = 0;
+ // Subvector extraction cost is equal to the cost of extracting element from
+ // the source type plus the cost of inserting them into the result vector
+ // type.
+ for (int i = 0; i != NumSubElts; ++i) {
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, Ty, i + Index);
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::InsertElement, SubTy, i);
+ }
+ return Cost;
+ }
+
+ /// Estimate a cost of subvector insertion as a sequence of extract and
+ /// insert operations.
+ unsigned getInsertSubvectorOverhead(Type *Ty, int Index, Type *SubTy) {
+ assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() &&
+ "Can only insert subvectors into vectors");
+ int NumSubElts = SubTy->getVectorNumElements();
+ assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() &&
+ "SK_InsertSubvector index out of range");
+
+ unsigned Cost = 0;
+ // Subvector insertion cost is equal to the cost of extracting element from
+ // the source type plus the cost of inserting them into the result vector
+ // type.
+ for (int i = 0; i != NumSubElts; ++i) {
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, SubTy, i);
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::InsertElement, Ty, i + Index);
+ }
+ return Cost;
+ }
+
/// Local query method delegates up to T which *must* implement this!
const TargetSubtargetInfo *getST() const {
return static_cast<const T *>(this)->getST();
@@ -554,14 +615,20 @@ public:
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
switch (Kind) {
+ case TTI::SK_Broadcast:
+ return getBroadcastShuffleOverhead(Tp);
case TTI::SK_Select:
+ case TTI::SK_Reverse:
case TTI::SK_Transpose:
case TTI::SK_PermuteSingleSrc:
case TTI::SK_PermuteTwoSrc:
return getPermuteShuffleOverhead(Tp);
- default:
- return 1;
+ case TTI::SK_ExtractSubvector:
+ return getExtractSubvectorOverhead(Tp, Index, SubTp);
+ case TTI::SK_InsertSubvector:
+ return getInsertSubvectorOverhead(Tp, Index, SubTp);
}
+ llvm_unreachable("Unknown TTI::ShuffleKind");
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
@@ -783,8 +850,9 @@ public:
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
- unsigned Alignment,
- unsigned AddressSpace) {
+ unsigned Alignment, unsigned AddressSpace,
+ bool UseMaskForCond = false,
+ bool UseMaskForGaps = false) {
VectorType *VT = dyn_cast<VectorType>(VecTy);
assert(VT && "Expect a vector type for interleaved memory op");
@@ -795,8 +863,13 @@ public:
VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
// Firstly, the cost of load/store operation.
- unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
- Opcode, VecTy, Alignment, AddressSpace);
+ unsigned Cost;
+ if (UseMaskForCond || UseMaskForGaps)
+ Cost = static_cast<T *>(this)->getMaskedMemoryOpCost(
+ Opcode, VecTy, Alignment, AddressSpace);
+ else
+ Cost = static_cast<T *>(this)->getMemoryOpCost(Opcode, VecTy, Alignment,
+ AddressSpace);
// Legalize the vector type, and get the legalized and unlegalized type
// sizes.
@@ -892,6 +965,40 @@ public:
->getVectorInstrCost(Instruction::InsertElement, VT, i);
}
+ if (!UseMaskForCond)
+ return Cost;
+
+ Type *I8Type = Type::getInt8Ty(VT->getContext());
+ VectorType *MaskVT = VectorType::get(I8Type, NumElts);
+ SubVT = VectorType::get(I8Type, NumSubElts);
+
+ // The Mask shuffling cost is extract all the elements of the Mask
+ // and insert each of them Factor times into the wide vector:
+ //
+ // E.g. an interleaved group with factor 3:
+ // %mask = icmp ult <8 x i32> %vec1, %vec2
+ // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
+ // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
+ // The cost is estimated as extract all mask elements from the <8xi1> mask
+ // vector and insert them factor times into the <24xi1> shuffled mask
+ // vector.
+ for (unsigned i = 0; i < NumSubElts; i++)
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, SubVT, i);
+
+ for (unsigned i = 0; i < NumElts; i++)
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::InsertElement, MaskVT, i);
+
+ // The Gaps mask is invariant and created outside the loop, therefore the
+ // cost of creating it is not accounted for here. However if we have both
+ // a MaskForGaps and some other mask that guards the execution of the
+ // memory access, we need to account for the cost of And-ing the two masks
+ // inside the loop.
+ if (UseMaskForGaps)
+ Cost += static_cast<T *>(this)->getArithmeticInstrCost(
+ BinaryOperator::And, MaskVT);
+
return Cost;
}
@@ -901,6 +1008,7 @@ public:
unsigned VF = 1) {
unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
+ auto *ConcreteTTI = static_cast<T *>(this);
switch (IID) {
default: {
@@ -926,29 +1034,24 @@ public:
ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
}
- return static_cast<T *>(this)->
- getIntrinsicInstrCost(IID, RetTy, Types, FMF, ScalarizationCost);
+ return ConcreteTTI->getIntrinsicInstrCost(IID, RetTy, Types, FMF,
+ ScalarizationCost);
}
case Intrinsic::masked_scatter: {
assert(VF == 1 && "Can't vectorize types here.");
Value *Mask = Args[3];
bool VarMask = !isa<Constant>(Mask);
unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
- return
- static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
- Args[0]->getType(),
- Args[1], VarMask,
- Alignment);
+ return ConcreteTTI->getGatherScatterOpCost(
+ Instruction::Store, Args[0]->getType(), Args[1], VarMask, Alignment);
}
case Intrinsic::masked_gather: {
assert(VF == 1 && "Can't vectorize types here.");
Value *Mask = Args[2];
bool VarMask = !isa<Constant>(Mask);
unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
- return
- static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
- RetTy, Args[0], VarMask,
- Alignment);
+ return ConcreteTTI->getGatherScatterOpCost(Instruction::Load, RetTy,
+ Args[0], VarMask, Alignment);
}
case Intrinsic::experimental_vector_reduce_add:
case Intrinsic::experimental_vector_reduce_mul:
@@ -964,6 +1067,45 @@ public:
case Intrinsic::experimental_vector_reduce_umax:
case Intrinsic::experimental_vector_reduce_umin:
return getIntrinsicInstrCost(IID, RetTy, Args[0]->getType(), FMF);
+ case Intrinsic::fshl:
+ case Intrinsic::fshr: {
+ Value *X = Args[0];
+ Value *Y = Args[1];
+ Value *Z = Args[2];
+ TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
+ TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
+ TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);
+ TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ);
+ TTI::OperandValueKind OpKindBW = TTI::OK_UniformConstantValue;
+ OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
+ : TTI::OP_None;
+ // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
+ // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
+ unsigned Cost = 0;
+ Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Or, RetTy);
+ Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Sub, RetTy);
+ Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::Shl, RetTy,
+ OpKindX, OpKindZ, OpPropsX);
+ Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
+ OpKindY, OpKindZ, OpPropsY);
+ // Non-constant shift amounts requires a modulo.
+ if (OpKindZ != TTI::OK_UniformConstantValue &&
+ OpKindZ != TTI::OK_NonUniformConstantValue)
+ Cost += ConcreteTTI->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
+ OpKindZ, OpKindBW, OpPropsZ,
+ OpPropsBW);
+ // For non-rotates (X != Y) we must add shift-by-zero handling costs.
+ if (X != Y) {
+ Type *CondTy = Type::getInt1Ty(RetTy->getContext());
+ if (RetVF > 1)
+ CondTy = VectorType::get(CondTy, RetVF);
+ Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy,
+ CondTy, nullptr);
+ Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
+ CondTy, nullptr);
+ }
+ return Cost;
+ }
}
}
@@ -1036,15 +1178,18 @@ public:
case Intrinsic::fabs:
ISDs.push_back(ISD::FABS);
break;
+ case Intrinsic::canonicalize:
+ ISDs.push_back(ISD::FCANONICALIZE);
+ break;
case Intrinsic::minnum:
ISDs.push_back(ISD::FMINNUM);
if (FMF.noNaNs())
- ISDs.push_back(ISD::FMINNAN);
+ ISDs.push_back(ISD::FMINIMUM);
break;
case Intrinsic::maxnum:
ISDs.push_back(ISD::FMAXNUM);
if (FMF.noNaNs())
- ISDs.push_back(ISD::FMAXNAN);
+ ISDs.push_back(ISD::FMAXIMUM);
break;
case Intrinsic::copysign:
ISDs.push_back(ISD::FCOPYSIGN);
@@ -1136,7 +1281,8 @@ public:
SmallVector<unsigned, 2> CustomCost;
for (unsigned ISD : ISDs) {
if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
- if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
+ if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
+ TLI->isFAbsFree(LT.second)) {
return 0;
}
@@ -1280,24 +1426,36 @@ public:
LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
while (NumVecElts > MVTLen) {
NumVecElts /= 2;
+ Type *SubTy = VectorType::get(ScalarTy, NumVecElts);
// Assume the pairwise shuffles add a cost.
ShuffleCost += (IsPairwise + 1) *
ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
- NumVecElts, Ty);
- ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
- Ty = VectorType::get(ScalarTy, NumVecElts);
+ NumVecElts, SubTy);
+ ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, SubTy);
+ Ty = SubTy;
++LongVectorCount;
}
+
+ NumReduxLevels -= LongVectorCount;
+
// The minimal length of the vector is limited by the real length of vector
// operations performed on the current platform. That's why several final
// reduction operations are performed on the vectors with the same
// architecture-dependent length.
- ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
- ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
- NumVecElts, Ty);
- ArithCost += (NumReduxLevels - LongVectorCount) *
+
+ // Non pairwise reductions need one shuffle per reduction level. Pairwise
+ // reductions need two shuffles on every level, but the last one. On that
+ // level one of the shuffles is <0, u, u, ...> which is identity.
+ unsigned NumShuffles = NumReduxLevels;
+ if (IsPairwise && NumReduxLevels >= 1)
+ NumShuffles += NumReduxLevels - 1;
+ ShuffleCost += NumShuffles *
+ ConcreteTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
+ 0, Ty);
+ ArithCost += NumReduxLevels *
ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
- return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
+ return ShuffleCost + ArithCost +
+ ConcreteTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
}
/// Try to calculate op costs for min/max reduction operations.
@@ -1327,37 +1485,46 @@ public:
LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
while (NumVecElts > MVTLen) {
NumVecElts /= 2;
+ Type *SubTy = VectorType::get(ScalarTy, NumVecElts);
+ CondTy = VectorType::get(ScalarCondTy, NumVecElts);
+
// Assume the pairwise shuffles add a cost.
ShuffleCost += (IsPairwise + 1) *
ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
- NumVecElts, Ty);
+ NumVecElts, SubTy);
MinMaxCost +=
- ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
- ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
+ ConcreteTTI->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy, nullptr) +
+ ConcreteTTI->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
nullptr);
- Ty = VectorType::get(ScalarTy, NumVecElts);
- CondTy = VectorType::get(ScalarCondTy, NumVecElts);
+ Ty = SubTy;
++LongVectorCount;
}
+
+ NumReduxLevels -= LongVectorCount;
+
// The minimal length of the vector is limited by the real length of vector
// operations performed on the current platform. That's why several final
// reduction opertions are perfomed on the vectors with the same
// architecture-dependent length.
- ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
- ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
- NumVecElts, Ty);
+
+ // Non pairwise reductions need one shuffle per reduction level. Pairwise
+ // reductions need two shuffles on every level, but the last one. On that
+ // level one of the shuffles is <0, u, u, ...> which is identity.
+ unsigned NumShuffles = NumReduxLevels;
+ if (IsPairwise && NumReduxLevels >= 1)
+ NumShuffles += NumReduxLevels - 1;
+ ShuffleCost += NumShuffles *
+ ConcreteTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
+ 0, Ty);
MinMaxCost +=
- (NumReduxLevels - LongVectorCount) *
+ NumReduxLevels *
(ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
nullptr));
- // Need 3 extractelement instructions for scalarization + an additional
- // scalar select instruction.
+ // The last min/max should be in vector registers and we counted it above.
+ // So just need a single extractelement.
return ShuffleCost + MinMaxCost +
- 3 * getScalarizationOverhead(Ty, /*Insert=*/false,
- /*Extract=*/true) +
- ConcreteTTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
- ScalarCondTy, nullptr);
+ ConcreteTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
}
unsigned getVectorSplitCost() { return 1; }
diff --git a/contrib/llvm/include/llvm/CodeGen/GCs.h b/contrib/llvm/include/llvm/CodeGen/BuiltinGCs.h
index 5207f801c84e..1767922fb5ac 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCs.h
+++ b/contrib/llvm/include/llvm/CodeGen/BuiltinGCs.h
@@ -1,4 +1,4 @@
-//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
+//===-- BuiltinGCs.h - Garbage collector linkage hacks --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains hack functions to force linking in the GC components.
+// This file contains hack functions to force linking in the builtin GC
+// components.
//
//===----------------------------------------------------------------------===//
@@ -15,32 +16,18 @@
#define LLVM_CODEGEN_GCS_H
namespace llvm {
-class GCStrategy;
-class GCMetadataPrinter;
/// FIXME: Collector instances are not useful on their own. These no longer
/// serve any purpose except to link in the plugins.
-/// Creates a CoreCLR-compatible garbage collector.
-void linkCoreCLRGC();
-
-/// Creates an ocaml-compatible garbage collector.
-void linkOcamlGC();
+/// Ensure the definition of the builtin GCs gets linked in
+void linkAllBuiltinGCs();
/// Creates an ocaml-compatible metadata printer.
void linkOcamlGCPrinter();
-/// Creates an erlang-compatible garbage collector.
-void linkErlangGC();
-
/// Creates an erlang-compatible metadata printer.
void linkErlangGCPrinter();
-
-/// Creates a shadow stack garbage collector. This collector requires no code
-/// generator support.
-void linkShadowStackGC();
-
-void linkStatepointExampleGC();
}
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/CommandFlags.inc b/contrib/llvm/include/llvm/CodeGen/CommandFlags.inc
index 7d2d167289e0..568d329a5e8c 100644
--- a/contrib/llvm/include/llvm/CodeGen/CommandFlags.inc
+++ b/contrib/llvm/include/llvm/CodeGen/CommandFlags.inc
@@ -74,7 +74,8 @@ static cl::opt<ThreadModel::Model> TMModel(
static cl::opt<llvm::CodeModel::Model> CMModel(
"code-model", cl::desc("Choose code model"),
- cl::values(clEnumValN(CodeModel::Small, "small", "Small code model"),
+ cl::values(clEnumValN(CodeModel::Tiny, "tiny", "Tiny code model"),
+ clEnumValN(CodeModel::Small, "small", "Small code model"),
clEnumValN(CodeModel::Kernel, "kernel", "Kernel code model"),
clEnumValN(CodeModel::Medium, "medium", "Medium code model"),
clEnumValN(CodeModel::Large, "large", "Large code model")));
@@ -113,10 +114,16 @@ static cl::opt<TargetMachine::CodeGenFileType> FileType(
clEnumValN(TargetMachine::CGFT_Null, "null",
"Emit nothing, for performance testing")));
-static cl::opt<bool>
- DisableFPElim("disable-fp-elim",
- cl::desc("Disable frame pointer elimination optimization"),
- cl::init(false));
+static cl::opt<llvm::FramePointer::FP> FramePointerUsage(
+ "frame-pointer", cl::desc("Specify frame pointer elimination optimization"),
+ cl::init(llvm::FramePointer::None),
+ cl::values(
+ clEnumValN(llvm::FramePointer::All, "all",
+ "Disable frame pointer elimination"),
+ clEnumValN(llvm::FramePointer::NonLeaf, "non-leaf",
+ "Disable frame pointer elimination for non-leaf frame"),
+ clEnumValN(llvm::FramePointer::None, "none",
+ "Enable frame pointer elimination")));
static cl::opt<bool> EnableUnsafeFPMath(
"enable-unsafe-fp-math",
@@ -367,9 +374,14 @@ setFunctionAttributes(StringRef CPU, StringRef Features, Module &M) {
NewAttrs.addAttribute("target-cpu", CPU);
if (!Features.empty())
NewAttrs.addAttribute("target-features", Features);
- if (DisableFPElim.getNumOccurrences() > 0)
- NewAttrs.addAttribute("no-frame-pointer-elim",
- DisableFPElim ? "true" : "false");
+ if (FramePointerUsage.getNumOccurrences() > 0) {
+ if (FramePointerUsage == llvm::FramePointer::All)
+ NewAttrs.addAttribute("frame-pointer", "all");
+ else if (FramePointerUsage == llvm::FramePointer::NonLeaf)
+ NewAttrs.addAttribute("frame-pointer", "non-leaf");
+ else if (FramePointerUsage == llvm::FramePointer::None)
+ NewAttrs.addAttribute("frame-pointer", "none");
+ }
if (DisableTailCalls.getNumOccurrences() > 0)
NewAttrs.addAttribute("disable-tail-calls",
toStringRef(DisableTailCalls));
diff --git a/contrib/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h b/contrib/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
new file mode 100644
index 000000000000..befc28f084e7
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
@@ -0,0 +1,87 @@
+//===- llvm/CodeGen/DbgEntityHistoryCalculator.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
+#define LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include <utility>
+
+namespace llvm {
+
+class DILocalVariable;
+class MachineFunction;
+class MachineInstr;
+class TargetRegisterInfo;
+
+// For each user variable, keep a list of instruction ranges where this variable
+// is accessible. The variables are listed in order of appearance.
+class DbgValueHistoryMap {
+ // Each instruction range starts with a DBG_VALUE instruction, specifying the
+ // location of a variable, which is assumed to be valid until the end of the
+ // range. If end is not specified, location is valid until the start
+ // instruction of the next instruction range, or until the end of the
+ // function.
+public:
+ using InstrRange = std::pair<const MachineInstr *, const MachineInstr *>;
+ using InstrRanges = SmallVector<InstrRange, 4>;
+ using InlinedEntity = std::pair<const DINode *, const DILocation *>;
+ using InstrRangesMap = MapVector<InlinedEntity, InstrRanges>;
+
+private:
+ InstrRangesMap VarInstrRanges;
+
+public:
+ void startInstrRange(InlinedEntity Var, const MachineInstr &MI);
+ void endInstrRange(InlinedEntity Var, const MachineInstr &MI);
+
+ // Returns register currently describing @Var. If @Var is currently
+ // unaccessible or is not described by a register, returns 0.
+ unsigned getRegisterForVar(InlinedEntity Var) const;
+
+ bool empty() const { return VarInstrRanges.empty(); }
+ void clear() { VarInstrRanges.clear(); }
+ InstrRangesMap::const_iterator begin() const { return VarInstrRanges.begin(); }
+ InstrRangesMap::const_iterator end() const { return VarInstrRanges.end(); }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const;
+#endif
+};
+
+/// For each inlined instance of a source-level label, keep the corresponding
+/// DBG_LABEL instruction. The DBG_LABEL instruction could be used to generate
+/// a temporary (assembler) label before it.
+class DbgLabelInstrMap {
+public:
+ using InlinedEntity = std::pair<const DINode *, const DILocation *>;
+ using InstrMap = MapVector<InlinedEntity, const MachineInstr *>;
+
+private:
+ InstrMap LabelInstr;
+
+public:
+ void addInstr(InlinedEntity Label, const MachineInstr &MI);
+
+ bool empty() const { return LabelInstr.empty(); }
+ void clear() { LabelInstr.clear(); }
+ InstrMap::const_iterator begin() const { return LabelInstr.begin(); }
+ InstrMap::const_iterator end() const { return LabelInstr.end(); }
+};
+
+void calculateDbgEntityHistory(const MachineFunction *MF,
+ const TargetRegisterInfo *TRI,
+ DbgValueHistoryMap &DbgValues,
+ DbgLabelInstrMap &DbgLabels);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DBGVALUEHISTORYCALCULATOR_H
diff --git a/contrib/llvm/include/llvm/CodeGen/DebugHandlerBase.h b/contrib/llvm/include/llvm/CodeGen/DebugHandlerBase.h
new file mode 100644
index 000000000000..4f0d14d317f2
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DebugHandlerBase.h
@@ -0,0 +1,138 @@
+//===-- llvm/CodeGen/DebugHandlerBase.h -----------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common functionality for different debug information format backends.
+// LLVM currently supports DWARF and CodeView.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DEBUGHANDLERBASE_H
+#define LLVM_CODEGEN_DEBUGHANDLERBASE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/CodeGen/AsmPrinterHandler.h"
+#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
+#include "llvm/CodeGen/LexicalScopes.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class MachineInstr;
+class MachineModuleInfo;
+
+/// Represents the location at which a variable is stored.
+struct DbgVariableLocation {
+ /// Base register.
+ unsigned Register;
+
+ /// Chain of offsetted loads necessary to load the value if it lives in
+ /// memory. Every load except for the last is pointer-sized.
+ SmallVector<int64_t, 1> LoadChain;
+
+ /// Present if the location is part of a larger variable.
+ llvm::Optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
+
+ /// Extract a VariableLocation from a MachineInstr.
+ /// This will only work if Instruction is a debug value instruction
+ /// and the associated DIExpression is in one of the supported forms.
+ /// If these requirements are not met, the returned Optional will not
+ /// have a value.
+ static Optional<DbgVariableLocation>
+ extractFromMachineInstruction(const MachineInstr &Instruction);
+};
+
+/// Base class for debug information backends. Common functionality related to
+/// tracking which variables and scopes are alive at a given PC live here.
+class DebugHandlerBase : public AsmPrinterHandler {
+protected:
+ DebugHandlerBase(AsmPrinter *A);
+
+ /// Target of debug info emission.
+ AsmPrinter *Asm;
+
+ /// Collected machine module information.
+ MachineModuleInfo *MMI;
+
+ /// Previous instruction's location information. This is used to
+ /// determine label location to indicate scope boundaries in debug info.
+ /// We track the previous instruction's source location (if not line 0),
+ /// whether it was a label, and its parent BB.
+ DebugLoc PrevInstLoc;
+ MCSymbol *PrevLabel = nullptr;
+ const MachineBasicBlock *PrevInstBB = nullptr;
+
+ /// This location indicates end of function prologue and beginning of
+ /// function body.
+ DebugLoc PrologEndLoc;
+
+ /// If nonnull, stores the current machine instruction we're processing.
+ const MachineInstr *CurMI = nullptr;
+
+ LexicalScopes LScopes;
+
+ /// History of DBG_VALUE and clobber instructions for each user
+ /// variable. Variables are listed in order of appearance.
+ DbgValueHistoryMap DbgValues;
+
+ /// Mapping of inlined labels and DBG_LABEL machine instruction.
+ DbgLabelInstrMap DbgLabels;
+
+ /// Maps instruction with label emitted before instruction.
+ /// FIXME: Make this private from DwarfDebug, we have the necessary accessors
+ /// for it.
+ DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
+
+ /// Maps instruction with label emitted after instruction.
+ DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;
+
+ /// Indentify instructions that are marking the beginning of or
+ /// ending of a scope.
+ void identifyScopeMarkers();
+
+ /// Ensure that a label will be emitted before MI.
+ void requestLabelBeforeInsn(const MachineInstr *MI) {
+ LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
+ }
+
+ /// Ensure that a label will be emitted after MI.
+ void requestLabelAfterInsn(const MachineInstr *MI) {
+ LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
+ }
+
+ virtual void beginFunctionImpl(const MachineFunction *MF) = 0;
+ virtual void endFunctionImpl(const MachineFunction *MF) = 0;
+ virtual void skippedNonDebugFunction() {}
+
+ // AsmPrinterHandler overrides.
+public:
+ void beginInstruction(const MachineInstr *MI) override;
+ void endInstruction() override;
+
+ void beginFunction(const MachineFunction *MF) override;
+ void endFunction(const MachineFunction *MF) override;
+
+ /// Return Label preceding the instruction.
+ MCSymbol *getLabelBeforeInsn(const MachineInstr *MI);
+
+ /// Return Label immediately following the instruction.
+ MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
+
+ /// Return the function-local offset of an instruction. A label for the
+ /// instruction \p MI should exist (\ref getLabelAfterInsn).
+ const MCExpr *getFunctionLocalOffsetAfterInsn(const MachineInstr *MI);
+
+ /// If this type is derived from a base type then return base type size.
+ static uint64_t getBaseTypeSize(const DITypeRef TyRef);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h b/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h
index e6c0483cfc35..8b1a7af17bbf 100644
--- a/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h
+++ b/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -10,6 +10,7 @@
#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringMap.h"
namespace llvm {
@@ -18,34 +19,52 @@ class MCSymbol;
/// Data for a string pool entry.
struct DwarfStringPoolEntry {
+ static constexpr unsigned NotIndexed = -1;
+
MCSymbol *Symbol;
unsigned Offset;
unsigned Index;
+
+ bool isIndexed() const { return Index != NotIndexed; }
};
/// String pool entry reference.
-struct DwarfStringPoolEntryRef {
- const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
+class DwarfStringPoolEntryRef {
+ PointerIntPair<const StringMapEntry<DwarfStringPoolEntry> *, 1, bool>
+ MapEntryAndIndexed;
+
+ const StringMapEntry<DwarfStringPoolEntry> *getMapEntry() const {
+ return MapEntryAndIndexed.getPointer();
+ }
public:
DwarfStringPoolEntryRef() = default;
- explicit DwarfStringPoolEntryRef(
- const StringMapEntry<DwarfStringPoolEntry> &I)
- : I(&I) {}
+ DwarfStringPoolEntryRef(const StringMapEntry<DwarfStringPoolEntry> &Entry,
+ bool Indexed)
+ : MapEntryAndIndexed(&Entry, Indexed) {}
- explicit operator bool() const { return I; }
+ explicit operator bool() const { return getMapEntry(); }
MCSymbol *getSymbol() const {
- assert(I->second.Symbol && "No symbol available!");
- return I->second.Symbol;
+ assert(getMapEntry()->second.Symbol && "No symbol available!");
+ return getMapEntry()->second.Symbol;
}
- unsigned getOffset() const { return I->second.Offset; }
- unsigned getIndex() const { return I->second.Index; }
- StringRef getString() const { return I->first(); }
+ unsigned getOffset() const { return getMapEntry()->second.Offset; }
+ bool isIndexed() const { return MapEntryAndIndexed.getInt(); }
+ unsigned getIndex() const {
+ assert(isIndexed());
+ assert(getMapEntry()->getValue().isIndexed());
+ return getMapEntry()->second.Index;
+ }
+ StringRef getString() const { return getMapEntry()->first(); }
/// Return the entire string pool entry for convenience.
- DwarfStringPoolEntry getEntry() const { return I->getValue(); }
+ DwarfStringPoolEntry getEntry() const { return getMapEntry()->getValue(); }
- bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
- bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
+ bool operator==(const DwarfStringPoolEntryRef &X) const {
+ return getMapEntry() == X.getMapEntry();
+ }
+ bool operator!=(const DwarfStringPoolEntryRef &X) const {
+ return getMapEntry() != X.getMapEntry();
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 2da00b7d61ab..7c658515de09 100644
--- a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -1,4 +1,4 @@
-//===- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen ---===//
+//===- FunctionLoweringInfo.h - Lower functions from LLVM IR ---*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -246,6 +246,7 @@ public:
return 0;
unsigned &R = ValueMap[V];
assert(R == 0 && "Already initialized this value register!");
+ assert(VirtReg2Value.empty());
return R = CreateRegs(V->getType());
}
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
index ad2599fc120e..7fb27202c122 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -55,12 +55,11 @@ class MCSymbol;
/// GCPoint - Metadata for a collector-safe point in machine code.
///
struct GCPoint {
- GC::PointKind Kind; ///< The kind of the safe point.
MCSymbol *Label; ///< A label.
DebugLoc Loc;
- GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
- : Kind(K), Label(L), Loc(std::move(DL)) {}
+ GCPoint(MCSymbol *L, DebugLoc DL)
+ : Label(L), Loc(std::move(DL)) {}
};
/// GCRoot - Metadata for a pointer to an object managed by the garbage
@@ -124,8 +123,8 @@ public:
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
/// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
- void addSafePoint(GC::PointKind Kind, MCSymbol *Label, const DebugLoc &DL) {
- SafePoints.emplace_back(Kind, Label, DL);
+ void addSafePoint(MCSymbol *Label, const DebugLoc &DL) {
+ SafePoints.emplace_back(Label, DL);
}
/// getFrameSize/setFrameSize - Records the function's frame size.
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
index 1cc69a7b71af..5f1efb2ce02c 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -29,6 +29,7 @@ class GCMetadataPrinter;
class GCModuleInfo;
class GCStrategy;
class Module;
+class StackMaps;
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
@@ -60,6 +61,11 @@ public:
/// Called after the assembly for the module is generated by
/// the AsmPrinter (but before target specific hooks)
virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+
+ /// Called when the stack maps are generated. Return true if
+ /// stack maps with a custom format are generated. Otherwise
+ /// returns false and the default format will be used.
+ virtual bool emitStackMaps(StackMaps &SM, AsmPrinter &AP) { return false; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
index f835bacfb548..5a60cd7cb823 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
@@ -59,19 +59,6 @@ namespace llvm {
class Type;
-namespace GC {
-
-/// PointKind - Used to indicate whether the address of the call instruction
-/// or the address after the call instruction is listed in the stackmap. For
-/// most runtimes, PostCall safepoints are appropriate.
-///
-enum PointKind {
- PreCall, ///< Instr is a call instruction.
- PostCall ///< Instr is the return address of a call.
-};
-
-} // end namespace GC
-
/// GCStrategy describes a garbage collector algorithm's code generation
/// requirements, and provides overridable hooks for those needs which cannot
/// be abstractly described. GCStrategy objects must be looked up through
@@ -88,11 +75,7 @@ protected:
/// if set, none of the other options can be
/// anything but their default values.
- unsigned NeededSafePoints = 0; ///< Bitmask of required safe points.
- bool CustomReadBarriers = false; ///< Default is to insert loads.
- bool CustomWriteBarriers = false; ///< Default is to insert stores.
- bool CustomRoots = false; ///< Default is to pass through to backend.
- bool InitRoots= true; ///< If set, roots are nulled during lowering.
+ bool NeededSafePoints = false; ///< if set, calls are inferred to be safepoints
bool UsesMetadata = false; ///< If set, backend must emit metadata tables.
public:
@@ -103,16 +86,6 @@ public:
/// name string specified on functions which use this strategy.
const std::string &getName() const { return Name; }
- /// By default, write barriers are replaced with simple store
- /// instructions. If true, you must provide a custom pass to lower
- /// calls to \@llvm.gcwrite.
- bool customWriteBarrier() const { return CustomWriteBarriers; }
-
- /// By default, read barriers are replaced with simple load
- /// instructions. If true, you must provide a custom pass to lower
- /// calls to \@llvm.gcread.
- bool customReadBarrier() const { return CustomReadBarriers; }
-
/// Returns true if this strategy is expecting the use of gc.statepoints,
/// and false otherwise.
bool useStatepoints() const { return UseStatepoints; }
@@ -135,25 +108,8 @@ public:
*/
///@{
- /// True if safe points of any kind are required. By default, none are
- /// recorded.
- bool needsSafePoints() const { return NeededSafePoints != 0; }
-
- /// True if the given kind of safe point is required. By default, none are
- /// recorded.
- bool needsSafePoint(GC::PointKind Kind) const {
- return (NeededSafePoints & 1 << Kind) != 0;
- }
-
- /// By default, roots are left for the code generator so it can generate a
- /// stack map. If true, you must provide a custom pass to lower
- /// calls to \@llvm.gcroot.
- bool customRoots() const { return CustomRoots; }
-
- /// If set, gcroot intrinsics should initialize their allocas to null
- /// before the first use. This is necessary for most GCs and is enabled by
- /// default.
- bool initializeRoots() const { return InitRoots; }
+ /// True if safe points need to be inferred on call sites
+ bool needsSafePoints() const { return NeededSafePoints; }
/// If set, appropriate metadata tables must be emitted by the back-end
/// (assembler, JIT, or otherwise). For statepoint, this method is
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h
new file mode 100644
index 000000000000..ce2d285a99e5
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -0,0 +1,237 @@
+//===- llvm/CodeGen/GlobalISel/CSEInfo.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Provides analysis for continuously CSEing during GISel passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
+/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
+/// UniqueMachineInstr vs making MachineInstr bigger.
+class UniqueMachineInstr : public FoldingSetNode {
+ friend class GISelCSEInfo;
+ const MachineInstr *MI;
+ explicit UniqueMachineInstr(const MachineInstr *MI) : MI(MI) {}
+
+public:
+ void Profile(FoldingSetNodeID &ID);
+};
+
+// Class representing some configuration that can be done during CSE analysis.
+// Currently it only supports shouldCSE method that each pass can set.
+class CSEConfig {
+public:
+ virtual ~CSEConfig() = default;
+ // Hook for defining which Generic instructions should be CSEd.
+ // GISelCSEInfo currently only calls this hook when dealing with generic
+ // opcodes.
+ virtual bool shouldCSEOpc(unsigned Opc);
+};
+
+// TODO: Find a better place for this.
+// Commonly used for O0 config.
+class CSEConfigConstantOnly : public CSEConfig {
+public:
+ virtual ~CSEConfigConstantOnly() = default;
+ virtual bool shouldCSEOpc(unsigned Opc) override;
+};
+
+/// The CSE Analysis object.
+/// This installs itself as a delegate to the MachineFunction to track
+/// new instructions as well as deletions. It however will not be able to
+/// track instruction mutations. In such cases, recordNewInstruction should be
+/// called (for eg inside MachineIRBuilder::recordInsertion).
+/// Also because of how just the instruction can be inserted without adding any
+/// operands to the instruction, instructions are uniqued and inserted lazily.
+/// CSEInfo should assert when trying to enter an incomplete instruction into
+/// the CSEMap. There is Opcode level granularity on which instructions can be
+/// CSE'd and for now, only Generic instructions are CSEable.
+class GISelCSEInfo : public GISelChangeObserver {
+ // Make it accessible only to CSEMIRBuilder.
+ friend class CSEMIRBuilder;
+
+ BumpPtrAllocator UniqueInstrAllocator;
+ FoldingSet<UniqueMachineInstr> CSEMap;
+ MachineRegisterInfo *MRI = nullptr;
+ MachineFunction *MF = nullptr;
+ std::unique_ptr<CSEConfig> CSEOpt;
+ /// Keep a cache of UniqueInstrs for each MachineInstr. In GISel,
+ /// often instructions are mutated (while their ID has completely changed).
+ /// Whenever mutation happens, invalidate the UniqueMachineInstr for the
+ /// MachineInstr
+ DenseMap<const MachineInstr *, UniqueMachineInstr *> InstrMapping;
+
+ /// Store instructions that are not fully formed in TemporaryInsts.
+ /// Also because CSE insertion happens lazily, we can remove insts from this
+ /// list and avoid inserting and then removing from the CSEMap.
+ GISelWorkList<8> TemporaryInsts;
+
+ // Only used in asserts.
+ DenseMap<unsigned, unsigned> OpcodeHitTable;
+
+ bool isUniqueMachineInstValid(const UniqueMachineInstr &UMI) const;
+
+ void invalidateUniqueMachineInstr(UniqueMachineInstr *UMI);
+
+ UniqueMachineInstr *getNodeIfExists(FoldingSetNodeID &ID,
+ MachineBasicBlock *MBB, void *&InsertPos);
+
+ /// Allocate and construct a new UniqueMachineInstr for MI and return.
+ UniqueMachineInstr *getUniqueInstrForMI(const MachineInstr *MI);
+
+ void insertNode(UniqueMachineInstr *UMI, void *InsertPos = nullptr);
+
+ /// Get the MachineInstr(Unique) if it exists already in the CSEMap and the
+ /// same MachineBasicBlock.
+ MachineInstr *getMachineInstrIfExists(FoldingSetNodeID &ID,
+ MachineBasicBlock *MBB,
+ void *&InsertPos);
+
+ /// Use this method to allocate a new UniqueMachineInstr for MI and insert it
+ /// into the CSEMap. MI should return true for shouldCSE(MI->getOpcode())
+ void insertInstr(MachineInstr *MI, void *InsertPos = nullptr);
+
+public:
+ GISelCSEInfo() = default;
+
+ virtual ~GISelCSEInfo();
+
+ void setMF(MachineFunction &MF);
+
+ /// Records a newly created inst in a list and lazily insert it to the CSEMap.
+ /// Sometimes, this method might be called with a partially constructed
+ /// MachineInstr,
+ // (right after BuildMI without adding any operands) - and in such cases,
+ // defer the hashing of the instruction to a later stage.
+ void recordNewInstruction(MachineInstr *MI);
+
+ /// Use this callback to inform CSE about a newly fully created instruction.
+ void handleRecordedInst(MachineInstr *MI);
+
+ /// Use this callback to insert all the recorded instructions. At this point,
+ /// all of these insts need to be fully constructed and should not be missing
+ /// any operands.
+ void handleRecordedInsts();
+
+ /// Remove this inst from the CSE map. If this inst has not been inserted yet,
+ /// it will be removed from the Tempinsts list if it exists.
+ void handleRemoveInst(MachineInstr *MI);
+
+ void releaseMemory();
+
+ void setCSEConfig(std::unique_ptr<CSEConfig> Opt) { CSEOpt = std::move(Opt); }
+
+ bool shouldCSE(unsigned Opc) const;
+
+ void analyze(MachineFunction &MF);
+
+ void countOpcodeHit(unsigned Opc);
+
+ void print();
+
+ // Observer API
+ void erasingInstr(MachineInstr &MI) override;
+ void createdInstr(MachineInstr &MI) override;
+ void changingInstr(MachineInstr &MI) override;
+ void changedInstr(MachineInstr &MI) override;
+};
+
+class TargetRegisterClass;
+class RegisterBank;
+
+// Simple builder class to easily profile properties about MIs.
+class GISelInstProfileBuilder {
+ FoldingSetNodeID &ID;
+ const MachineRegisterInfo &MRI;
+
+public:
+ GISelInstProfileBuilder(FoldingSetNodeID &ID, const MachineRegisterInfo &MRI)
+ : ID(ID), MRI(MRI) {}
+ // Profiling methods.
+ const GISelInstProfileBuilder &addNodeIDOpcode(unsigned Opc) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const LLT &Ty) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const unsigned) const;
+
+ const GISelInstProfileBuilder &
+ addNodeIDRegType(const TargetRegisterClass *RC) const;
+ const GISelInstProfileBuilder &addNodeIDRegType(const RegisterBank *RB) const;
+
+ const GISelInstProfileBuilder &addNodeIDRegNum(unsigned Reg) const;
+
+ const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
+ const GISelInstProfileBuilder &
+ addNodeIDMBB(const MachineBasicBlock *MBB) const;
+
+ const GISelInstProfileBuilder &
+ addNodeIDMachineOperand(const MachineOperand &MO) const;
+
+ const GISelInstProfileBuilder &addNodeIDFlag(unsigned Flag) const;
+ const GISelInstProfileBuilder &addNodeID(const MachineInstr *MI) const;
+};
+
+/// Simple wrapper that does the following.
+/// 1) Lazily evaluate the MachineFunction to compute CSEable instructions.
+/// 2) Allows configuration of which instructions are CSEd through CSEConfig
+/// object. Provides a method called get which takes a CSEConfig object.
+class GISelCSEAnalysisWrapper {
+ GISelCSEInfo Info;
+ MachineFunction *MF = nullptr;
+ bool AlreadyComputed = false;
+
+public:
+ /// Takes a CSEConfig object that defines what opcodes get CSEd.
+ /// If CSEConfig is already set, and the CSE Analysis has been preserved,
+ /// it will not use the new CSEOpt(use Recompute to force using the new
+ /// CSEOpt).
+ GISelCSEInfo &get(std::unique_ptr<CSEConfig> CSEOpt, bool ReCompute = false);
+ void setMF(MachineFunction &MFunc) { MF = &MFunc; }
+ void setComputed(bool Computed) { AlreadyComputed = Computed; }
+ void releaseMemory() { Info.releaseMemory(); }
+};
+
+/// The actual analysis pass wrapper.
+class GISelCSEAnalysisWrapperPass : public MachineFunctionPass {
+ GISelCSEAnalysisWrapper Wrapper;
+
+public:
+ static char ID;
+ GISelCSEAnalysisWrapperPass() : MachineFunctionPass(ID) {
+ initializeGISelCSEAnalysisWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ const GISelCSEAnalysisWrapper &getCSEWrapper() const { return Wrapper; }
+ GISelCSEAnalysisWrapper &getCSEWrapper() { return Wrapper; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void releaseMemory() override {
+ Wrapper.releaseMemory();
+ Wrapper.setComputed(false);
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
new file mode 100644
index 000000000000..a8fb736ebbb5
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
@@ -0,0 +1,110 @@
+//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.h --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which CSEs insts within
+/// a MachineBasicBlock.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+/// Defines a builder that does CSE of MachineInstructions using GISelCSEInfo.
+/// Eg usage.
+///
+///
+/// GISelCSEInfo *Info =
+/// &getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEInfo(); CSEMIRBuilder
+/// CB(Builder.getState()); CB.setCSEInfo(Info); auto A = CB.buildConstant(s32,
+/// 42); auto B = CB.buildConstant(s32, 42); assert(A == B); unsigned CReg =
+/// MRI.createGenericVirtualRegister(s32); auto C = CB.buildConstant(CReg, 42);
+/// assert(C->getOpcode() == TargetOpcode::COPY);
+/// Explicitly passing in a register would materialize a copy if possible.
+/// CSEMIRBuilder also does trivial constant folding for binary ops.
+class CSEMIRBuilder : public MachineIRBuilder {
+
+ /// Returns true if A dominates B (within the same basic block).
+ /// Both iterators must be in the same basic block.
+ //
+ // TODO: Another approach for checking dominance is having two iterators and
+ // making them go towards each other until they meet or reach begin/end. Which
+ // approach is better? Should this even change dynamically? For G_CONSTANTS
+ // most of which will be at the top of the BB, the top down approach would be
+ // a better choice. Does IRTranslator placing constants at the beginning still
+ // make sense? Should this change based on Opcode?
+ bool dominates(MachineBasicBlock::const_iterator A,
+ MachineBasicBlock::const_iterator B) const;
+
+ /// For given ID, find a machineinstr in the CSE Map. If found, check if it
+ /// dominates the current insertion point and if not, move it just before the
+ /// current insertion point and return it. If not found, return Null
+ /// MachineInstrBuilder.
+ MachineInstrBuilder getDominatingInstrForID(FoldingSetNodeID &ID,
+ void *&NodeInsertPos);
+ /// Simple check if we can CSE (we have the CSEInfo) or if this Opcode is
+ /// safe to CSE.
+ bool canPerformCSEForOpc(unsigned Opc) const;
+
+ void profileDstOp(const DstOp &Op, GISelInstProfileBuilder &B) const;
+
+ void profileDstOps(ArrayRef<DstOp> Ops, GISelInstProfileBuilder &B) const {
+ for (const DstOp &Op : Ops)
+ profileDstOp(Op, B);
+ }
+
+ void profileSrcOp(const SrcOp &Op, GISelInstProfileBuilder &B) const;
+
+ void profileSrcOps(ArrayRef<SrcOp> Ops, GISelInstProfileBuilder &B) const {
+ for (const SrcOp &Op : Ops)
+ profileSrcOp(Op, B);
+ }
+
+ void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;
+
+ void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps, Optional<unsigned> Flags,
+ GISelInstProfileBuilder &B) const;
+
+ // Takes a MachineInstrBuilder and inserts it into the CSEMap using the
+ // NodeInsertPos.
+ MachineInstrBuilder memoizeMI(MachineInstrBuilder MIB, void *NodeInsertPos);
+
+ // If we have can CSE an instruction, but still need to materialize to a VReg,
+ // we emit a copy from the CSE'd inst to the VReg.
+ MachineInstrBuilder generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
+ MachineInstrBuilder &MIB);
+
+ // If we have can CSE an instruction, but still need to materialize to a VReg,
+ // check if we can generate copies. It's not possible to return a single MIB,
+ // while emitting copies to multiple vregs.
+ bool checkCopyToDefsPossible(ArrayRef<DstOp> DstOps);
+
+public:
+ // Pull in base class constructors.
+ using MachineIRBuilder::MachineIRBuilder;
+ // Unhide buildInstr
+ MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flag = None) override;
+ // Bring in the other overload from the base class.
+ using MachineIRBuilder::buildConstant;
+
+ MachineInstrBuilder buildConstant(const DstOp &Res,
+ const ConstantInt &Val) override;
+
+ // Bring in the other overload from the base class.
+ using MachineIRBuilder::buildFConstant;
+ MachineInstrBuilder buildFConstant(const DstOp &Res,
+ const ConstantFP &Val) override;
+};
+} // namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 58eb412d8c24..ab498e8f070b 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -40,6 +40,7 @@ class Value;
class CallLowering {
const TargetLowering *TLI;
+ virtual void anchor();
public:
struct ArgInfo {
unsigned Reg;
@@ -108,6 +109,9 @@ public:
MachineIRBuilder &MIRBuilder;
MachineRegisterInfo &MRI;
CCAssignFn *AssignFn;
+
+ private:
+ virtual void anchor();
};
protected:
@@ -138,12 +142,12 @@ public:
virtual ~CallLowering() = default;
/// This hook must be implemented to lower outgoing return values, described
- /// by \p Val, into the specified virtual register \p VReg.
+ /// by \p Val, into the specified virtual registers \p VRegs.
/// This hook is used by GlobalISel.
///
/// \return True if the lowering succeeds, false otherwise.
- virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
- const Value *Val, unsigned VReg) const {
+ virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+ ArrayRef<unsigned> VRegs) const {
return false;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
index 36a33deb4a64..b097c7817762 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -21,6 +21,7 @@
namespace llvm {
class MachineRegisterInfo;
class CombinerInfo;
+class GISelCSEInfo;
class TargetPassConfig;
class MachineFunction;
@@ -28,14 +29,17 @@ class Combiner {
public:
Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
- bool combineMachineInstrs(MachineFunction &MF);
+ /// If CSEInfo is not null, then the Combiner will setup observer for
+ /// CSEInfo and instantiate a CSEMIRBuilder. Pass nullptr if CSE is not
+ /// needed.
+ bool combineMachineInstrs(MachineFunction &MF, GISelCSEInfo *CSEInfo);
protected:
CombinerInfo &CInfo;
MachineRegisterInfo *MRI = nullptr;
const TargetPassConfig *TPC;
- MachineIRBuilder Builder;
+ std::unique_ptr<MachineIRBuilder> Builder;
};
} // End namespace llvm.
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 5d5b8398452c..6e9ac01c1ee2 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -1,4 +1,4 @@
-//== llvm/CodeGen/GlobalISel/CombinerHelper.h -------------- -*- C++ -*-==//
+//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,21 +20,36 @@
namespace llvm {
+class GISelChangeObserver;
class MachineIRBuilder;
class MachineRegisterInfo;
class MachineInstr;
+class MachineOperand;
class CombinerHelper {
MachineIRBuilder &Builder;
MachineRegisterInfo &MRI;
+ GISelChangeObserver &Observer;
public:
- CombinerHelper(MachineIRBuilder &B);
+ CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B);
+
+ /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
+ void replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, unsigned ToReg) const;
+
+ /// Replace a single register operand with a new register and inform the
+ /// observer of the changes.
+ void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
+ unsigned ToReg) const;
/// If \p MI is COPY, try to combine it.
/// Returns true if MI changed.
bool tryCombineCopy(MachineInstr &MI);
+ /// If \p MI is extend that consumes the result of a load, try to combine it.
+ /// Returns true if MI changed.
+ bool tryCombineExtendingLoads(MachineInstr &MI);
+
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
index 1d248547adbf..d21aa3f725d9 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -17,10 +17,12 @@
#include <cassert>
namespace llvm {
+class GISelChangeObserver;
class LegalizerInfo;
class MachineInstr;
class MachineIRBuilder;
class MachineRegisterInfo;
+
// Contains information relevant to enabling/disabling various combines for a
// pass.
class CombinerInfo {
@@ -41,7 +43,19 @@ public:
/// illegal ops that are created.
bool LegalizeIllegalOps; // TODO: Make use of this.
const LegalizerInfo *LInfo;
- virtual bool combine(MachineInstr &MI, MachineIRBuilder &B) const = 0;
+
+ /// Attempt to combine instructions using MI as the root.
+ ///
+ /// Use Observer to report the creation, modification, and erasure of
+ /// instructions. GISelChangeObserver will automatically report certain
+ /// kinds of operations. These operations are:
+ /// * Instructions that are newly inserted into the MachineFunction
+ /// * Instructions that are erased from the MachineFunction.
+ ///
+ /// However, it is important to report instruction modification and this is
+ /// not automatic.
+ virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const = 0;
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
index 8d61f9a68279..220a571b21db 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -15,91 +15,20 @@
namespace llvm {
-static Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
- const unsigned Op2,
- const MachineRegisterInfo &MRI) {
- auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
- auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
- if (MaybeOp1Cst && MaybeOp2Cst) {
- LLT Ty = MRI.getType(Op1);
- APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
- APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
- switch (Opcode) {
- default:
- break;
- case TargetOpcode::G_ADD:
- return C1 + C2;
- case TargetOpcode::G_AND:
- return C1 & C2;
- case TargetOpcode::G_ASHR:
- return C1.ashr(C2);
- case TargetOpcode::G_LSHR:
- return C1.lshr(C2);
- case TargetOpcode::G_MUL:
- return C1 * C2;
- case TargetOpcode::G_OR:
- return C1 | C2;
- case TargetOpcode::G_SHL:
- return C1 << C2;
- case TargetOpcode::G_SUB:
- return C1 - C2;
- case TargetOpcode::G_XOR:
- return C1 ^ C2;
- case TargetOpcode::G_UDIV:
- if (!C2.getBoolValue())
- break;
- return C1.udiv(C2);
- case TargetOpcode::G_SDIV:
- if (!C2.getBoolValue())
- break;
- return C1.sdiv(C2);
- case TargetOpcode::G_UREM:
- if (!C2.getBoolValue())
- break;
- return C1.urem(C2);
- case TargetOpcode::G_SREM:
- if (!C2.getBoolValue())
- break;
- return C1.srem(C2);
- }
- }
- return None;
-}
-
/// An MIRBuilder which does trivial constant folding of binary ops.
/// Calls to buildInstr will also try to constant fold binary ops.
-class ConstantFoldingMIRBuilder
- : public FoldableInstructionsBuilder<ConstantFoldingMIRBuilder> {
+class ConstantFoldingMIRBuilder : public MachineIRBuilder {
public:
// Pull in base class constructors.
- using FoldableInstructionsBuilder<
- ConstantFoldingMIRBuilder>::FoldableInstructionsBuilder;
- // Unhide buildInstr
- using FoldableInstructionsBuilder<ConstantFoldingMIRBuilder>::buildInstr;
+ using MachineIRBuilder::MachineIRBuilder;
- // Implement buildBinaryOp required by FoldableInstructionsBuilder which
- // tries to constant fold.
- MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
- unsigned Src0, unsigned Src1) {
- validateBinaryOp(Dst, Src0, Src1);
- auto MaybeCst = ConstantFoldBinOp(Opcode, Src0, Src1, getMF().getRegInfo());
- if (MaybeCst)
- return buildConstant(Dst, MaybeCst->getSExtValue());
- return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
- }
-
- template <typename DstTy, typename UseArg1Ty, typename UseArg2Ty>
- MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArg1Ty &&Arg1,
- UseArg2Ty &&Arg2) {
- unsigned Dst = getDestFromArg(Ty);
- return buildInstr(Opc, Dst, getRegFromArg(std::forward<UseArg1Ty>(Arg1)),
- getRegFromArg(std::forward<UseArg2Ty>(Arg2)));
- }
+ virtual ~ConstantFoldingMIRBuilder() = default;
// Try to provide an overload for buildInstr for binary ops in order to
// constant fold.
- MachineInstrBuilder buildInstr(unsigned Opc, unsigned Dst, unsigned Src0,
- unsigned Src1) {
+ MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flags = None) override {
switch (Opc) {
default:
break;
@@ -116,19 +45,18 @@ public:
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UREM:
case TargetOpcode::G_SREM: {
- return buildBinaryOp(Opc, Dst, Src0, Src1);
+ assert(DstOps.size() == 1 && "Invalid dst ops");
+ assert(SrcOps.size() == 2 && "Invalid src ops");
+ const DstOp &Dst = DstOps[0];
+ const SrcOp &Src0 = SrcOps[0];
+ const SrcOp &Src1 = SrcOps[1];
+ if (auto MaybeCst =
+ ConstantFoldBinOp(Opc, Src0.getReg(), Src1.getReg(), *getMRI()))
+ return buildConstant(Dst, MaybeCst->getSExtValue());
+ break;
}
}
- return buildInstr(Opc).addDef(Dst).addUse(Src0).addUse(Src1);
- }
-
- // Fallback implementation of buildInstr.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
- UseArgsTy &&... Args) {
- auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
- addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
- return MIB;
+ return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps);
}
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
new file mode 100644
index 000000000000..c8e8a7a5a7cb
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -0,0 +1,111 @@
+//===----- llvm/CodeGen/GlobalISel/GISelChangeObserver.h ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to allow clients to notify changes to machine
+/// instr.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
+#define LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+class MachineInstr;
+class MachineRegisterInfo;
+
+/// Abstract class that contains various methods for clients to notify about
+/// changes. This should be the preferred way for APIs to notify changes.
+/// Typically calling erasingInstr/createdInstr multiple times should not affect
+/// the result. The observer would likely need to check if it was already
+/// notified earlier (consider using GISelWorkList).
+class GISelChangeObserver {
+ SmallPtrSet<MachineInstr *, 4> ChangingAllUsesOfReg;
+
+public:
+ virtual ~GISelChangeObserver() {}
+
+ /// An instruction is about to be erased.
+ virtual void erasingInstr(MachineInstr &MI) = 0;
+ /// An instruction was created and inserted into the function.
+ virtual void createdInstr(MachineInstr &MI) = 0;
+ /// This instruction is about to be mutated in some way.
+ virtual void changingInstr(MachineInstr &MI) = 0;
+ /// This instruction was mutated in some way.
+ virtual void changedInstr(MachineInstr &MI) = 0;
+
+ /// All the instructions using the given register are being changed.
+ /// For convenience, finishedChangingAllUsesOfReg() will report the completion
+ /// of the changes. The use list may change between this call and
+ /// finishedChangingAllUsesOfReg().
+ void changingAllUsesOfReg(const MachineRegisterInfo &MRI, unsigned Reg);
+ /// All instructions reported as changing by changingAllUsesOfReg() have
+ /// finished being changed.
+ void finishedChangingAllUsesOfReg();
+
+};
+
+/// Simple wrapper observer that takes several observers, and calls
+/// each one for each event. If there are multiple observers (say CSE,
+/// Legalizer, Combiner), it's sufficient to register this to the machine
+/// function as the delegate.
+class GISelObserverWrapper : public MachineFunction::Delegate,
+ public GISelChangeObserver {
+ SmallVector<GISelChangeObserver *, 4> Observers;
+
+public:
+ GISelObserverWrapper() = default;
+ GISelObserverWrapper(ArrayRef<GISelChangeObserver *> Obs)
+ : Observers(Obs.begin(), Obs.end()) {}
+ // Adds an observer.
+ void addObserver(GISelChangeObserver *O) { Observers.push_back(O); }
+ // Removes an observer from the list and does nothing if observer is not
+ // present.
+ void removeObserver(GISelChangeObserver *O) {
+ auto It = std::find(Observers.begin(), Observers.end(), O);
+ if (It != Observers.end())
+ Observers.erase(It);
+ }
+ // API for Observer.
+ void erasingInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->erasingInstr(MI);
+ }
+ void createdInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->createdInstr(MI);
+ }
+ void changingInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->changingInstr(MI);
+ }
+ void changedInstr(MachineInstr &MI) override {
+ for (auto &O : Observers)
+ O->changedInstr(MI);
+ }
+ // API for MachineFunction::Delegate
+ void MF_HandleInsertion(MachineInstr &MI) override { createdInstr(MI); }
+ void MF_HandleRemoval(MachineInstr &MI) override { erasingInstr(MI); }
+};
+
+/// A simple RAII based CSEInfo installer.
+/// Use this in a scope to install a delegate to the MachineFunction and reset
+/// it at the end of the scope.
+class RAIIDelegateInstaller {
+ MachineFunction &MF;
+ MachineFunction::Delegate *Delegate;
+
+public:
+ RAIIDelegateInstaller(MachineFunction &MF, MachineFunction::Delegate *Del);
+ ~RAIIDelegateInstaller();
+};
+
+} // namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
index 167905dc9aa1..1571841a208d 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -12,38 +12,42 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/Debug.h"
namespace llvm {
class MachineInstr;
+class MachineFunction;
-// Worklist which mostly works similar to InstCombineWorkList, but on MachineInstrs.
-// The main difference with something like a SetVector is that erasing an element doesn't
-// move all elements over one place - instead just nulls out the element of the vector.
-// FIXME: Does it make sense to factor out common code with the instcombinerWorkList?
+// Worklist which mostly works similar to InstCombineWorkList, but on
+// MachineInstrs. The main difference with something like a SetVector is that
+// erasing an element doesn't move all elements over one place - instead just
+// nulls out the element of the vector.
+//
+// FIXME: Does it make sense to factor out common code with the
+// instcombinerWorkList?
template<unsigned N>
class GISelWorkList {
- SmallVector<MachineInstr*, N> Worklist;
- DenseMap<MachineInstr*, unsigned> WorklistMap;
+ SmallVector<MachineInstr *, N> Worklist;
+ DenseMap<MachineInstr *, unsigned> WorklistMap;
public:
- GISelWorkList() = default;
+ GISelWorkList() {}
bool empty() const { return WorklistMap.empty(); }
unsigned size() const { return WorklistMap.size(); }
- /// Add - Add the specified instruction to the worklist if it isn't already
- /// in it.
+ /// Add the specified instruction to the worklist if it isn't already in it.
void insert(MachineInstr *I) {
- if (WorklistMap.try_emplace(I, Worklist.size()).second) {
+ if (WorklistMap.try_emplace(I, Worklist.size()).second)
Worklist.push_back(I);
- }
}
- /// Remove - remove I from the worklist if it exists.
- void remove(MachineInstr *I) {
+ /// Remove I from the worklist if it exists.
+ void remove(const MachineInstr *I) {
auto It = WorklistMap.find(I);
if (It == WorklistMap.end()) return; // Not in worklist.
@@ -53,6 +57,11 @@ public:
WorklistMap.erase(It);
}
+ void clear() {
+ Worklist.clear();
+ WorklistMap.clear();
+ }
+
MachineInstr *pop_back_val() {
MachineInstr *I;
do {
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 2498ee933210..d1770bf6e4ce 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -21,11 +21,11 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Types.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Support/Allocator.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Allocator.h"
#include <memory>
#include <utility>
@@ -300,6 +300,8 @@ private:
bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
+ bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
+
bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
}
@@ -442,11 +444,13 @@ private:
// I.e., compared to regular MIBuilder, this one also inserts the instruction
// in the current block, it can creates block, etc., basically a kind of
// IRBuilder, but for Machine IR.
- MachineIRBuilder CurBuilder;
+ // CSEMIRBuilder CurBuilder;
+ std::unique_ptr<MachineIRBuilder> CurBuilder;
// Builder set to the entry block (just after ABI lowering instructions). Used
// as a convenient location for Constants.
- MachineIRBuilder EntryBuilder;
+ // CSEMIRBuilder EntryBuilder;
+ std::unique_ptr<MachineIRBuilder> EntryBuilder;
// The MachineFunction currently being translated.
MachineFunction *MF;
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 873587651efd..20bec7650179 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --===========//
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h -----*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -14,12 +14,14 @@
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "legalizer"
+using namespace llvm::MIPatternMatch;
namespace llvm {
class LegalizationArtifactCombiner {
@@ -36,15 +38,29 @@ public:
SmallVectorImpl<MachineInstr *> &DeadInsts) {
if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
return false;
- if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
- MI.getOperand(1).getReg(), MRI)) {
+
+ Builder.setInstr(MI);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // aext(trunc x) - > aext/copy/trunc x
+ unsigned TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = DefMI->getOperand(1).getReg();
- Builder.setInstr(MI);
- // We get a copy/trunc/extend depending on the sizes
- Builder.buildAnyExtOrTrunc(DstReg, SrcReg);
- markInstAndDefDead(MI, *DefMI, DeadInsts);
+ Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+ return true;
+ }
+
+ // aext([asz]ext x) -> [asz]ext x
+ unsigned ExtSrc;
+ MachineInstr *ExtMI;
+ if (mi_match(SrcReg, MRI,
+ m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
+ m_GSExt(m_Reg(ExtSrc)),
+ m_GZExt(m_Reg(ExtSrc)))))) {
+ Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
+ markInstAndDefDead(MI, *ExtMI, DeadInsts);
return true;
}
return tryFoldImplicitDef(MI, DeadInsts);
@@ -55,24 +71,25 @@ public:
if (MI.getOpcode() != TargetOpcode::G_ZEXT)
return false;
- if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
- MI.getOperand(1).getReg(), MRI)) {
- unsigned DstReg = MI.getOperand(0).getReg();
+
+ Builder.setInstr(MI);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // zext(trunc x) - > and (aext/copy/trunc x), mask
+ unsigned TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
- Builder.setInstr(MI);
- unsigned ZExtSrc = MI.getOperand(1).getReg();
- LLT ZExtSrcTy = MRI.getType(ZExtSrc);
- APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits());
- auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue());
- unsigned TruncSrc = DefMI->getOperand(1).getReg();
- // We get a copy/trunc/extend depending on the sizes
- auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
- Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB);
- markInstAndDefDead(MI, *DefMI, DeadInsts);
+ LLT SrcTy = MRI.getType(SrcReg);
+ APInt Mask = APInt::getAllOnesValue(SrcTy.getSizeInBits());
+ auto MIBMask = Builder.buildConstant(DstTy, Mask.getZExtValue());
+ Builder.buildAnd(DstReg, Builder.buildAnyExtOrTrunc(DstTy, TruncSrc),
+ MIBMask);
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
return true;
}
return tryFoldImplicitDef(MI, DeadInsts);
@@ -83,33 +100,34 @@ public:
if (MI.getOpcode() != TargetOpcode::G_SEXT)
return false;
- if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
- MI.getOperand(1).getReg(), MRI)) {
- unsigned DstReg = MI.getOperand(0).getReg();
+
+ Builder.setInstr(MI);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+ // sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
+ unsigned TruncSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
- Builder.setInstr(MI);
- unsigned SExtSrc = MI.getOperand(1).getReg();
- LLT SExtSrcTy = MRI.getType(SExtSrc);
- unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits();
- auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff);
- unsigned TruncSrcReg = DefMI->getOperand(1).getReg();
- // We get a copy/trunc/extend depending on the sizes
- auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg);
- auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy,
- SrcCopyExtOrTrunc, SizeDiffMIB);
- Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB);
- markInstAndDefDead(MI, *DefMI, DeadInsts);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned ShAmt = DstTy.getSizeInBits() - SrcTy.getSizeInBits();
+ auto MIBShAmt = Builder.buildConstant(DstTy, ShAmt);
+ auto MIBShl = Builder.buildInstr(
+ TargetOpcode::G_SHL, {DstTy},
+ {Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), MIBShAmt});
+ Builder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {MIBShl, MIBShAmt});
+ markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
return true;
}
return tryFoldImplicitDef(MI, DeadInsts);
}
- /// Try to fold sb = EXTEND (G_IMPLICIT_DEF sa) -> sb = G_IMPLICIT_DEF
+ /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
bool tryFoldImplicitDef(MachineInstr &MI,
SmallVectorImpl<MachineInstr *> &DeadInsts) {
unsigned Opcode = MI.getOpcode();
@@ -119,13 +137,25 @@ public:
if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(), MRI)) {
+ Builder.setInstr(MI);
unsigned DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
- return false;
- LLVM_DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
- Builder.setInstr(MI);
- Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
+
+ if (Opcode == TargetOpcode::G_ANYEXT) {
+ // G_ANYEXT (G_IMPLICIT_DEF) -> G_IMPLICIT_DEF
+ if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine G_ANYEXT(G_IMPLICIT_DEF): " << MI;);
+ Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
+ } else {
+ // G_[SZ]EXT (G_IMPLICIT_DEF) -> G_CONSTANT 0 because the top
+ // bits will be 0 for G_ZEXT and 0/1 for the G_SEXT.
+ if (isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+ return false;
+ LLVM_DEBUG(dbgs() << ".. Combine G_[SZ]EXT(G_IMPLICIT_DEF): " << MI;);
+ Builder.buildConstant(DstReg, 0);
+ }
+
markInstAndDefDead(MI, *DefMI, DeadInsts);
return true;
}
@@ -139,8 +169,20 @@ public:
return false;
unsigned NumDefs = MI.getNumOperands() - 1;
- MachineInstr *MergeI = getOpcodeDef(TargetOpcode::G_MERGE_VALUES,
- MI.getOperand(NumDefs).getReg(), MRI);
+
+ unsigned MergingOpcode;
+ LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
+ LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+ if (OpTy.isVector() && DestTy.isVector())
+ MergingOpcode = TargetOpcode::G_CONCAT_VECTORS;
+ else if (OpTy.isVector() && !DestTy.isVector())
+ MergingOpcode = TargetOpcode::G_BUILD_VECTOR;
+ else
+ MergingOpcode = TargetOpcode::G_MERGE_VALUES;
+
+ MachineInstr *MergeI =
+ getOpcodeDef(MergingOpcode, MI.getOperand(NumDefs).getReg(), MRI);
+
if (!MergeI)
return false;
@@ -277,6 +319,19 @@ private:
auto Step = LI.getAction(Query);
return Step.Action == Unsupported || Step.Action == NotFound;
}
+
+ /// Looks through copy instructions and returns the actual
+ /// source register.
+ unsigned lookThroughCopyInstrs(unsigned Reg) {
+ unsigned TmpReg;
+ while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
+ if (MRI.getType(TmpReg).isValid())
+ Reg = TmpReg;
+ else
+ break;
+ }
+ return Reg;
+ }
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index d122e67b87b8..9b4ecf9284e3 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -32,6 +32,7 @@ namespace llvm {
class LegalizerInfo;
class Legalizer;
class MachineRegisterInfo;
+class GISelChangeObserver;
class LegalizerHelper {
public:
@@ -48,7 +49,10 @@ public:
UnableToLegalize,
};
- LegalizerHelper(MachineFunction &MF);
+ LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
+ MachineIRBuilder &B);
+ LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
+ GISelChangeObserver &Observer, MachineIRBuilder &B);
/// Replace \p MI by a sequence of legal instructions that can implement the
/// same operation. Note that this means \p MI may be deleted, so any iterator
@@ -87,7 +91,7 @@ public:
/// Expose MIRBuilder so clients can set their own RecordInsertInstruction
/// functions
- MachineIRBuilder MIRBuilder;
+ MachineIRBuilder &MIRBuilder;
/// Expose LegalizerInfo so the clients can re-use.
const LegalizerInfo &getLegalizerInfo() const { return LI; }
@@ -112,8 +116,12 @@ private:
void extractParts(unsigned Reg, LLT Ty, int NumParts,
SmallVectorImpl<unsigned> &VRegs);
+ LegalizeResult lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
+ /// To keep track of changes made by the LegalizerHelper.
+ GISelChangeObserver &Observer;
};
/// Helper function that creates the given libcall.
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index a8c26082f221..13776dd3e87d 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -39,6 +39,7 @@ class MachineInstr;
class MachineIRBuilder;
class MachineRegisterInfo;
class MCInstrInfo;
+class GISelChangeObserver;
namespace LegalizeActions {
enum LegalizeAction : std::uint8_t {
@@ -121,7 +122,7 @@ struct LegalityQuery {
ArrayRef<LLT> Types;
struct MemDesc {
- uint64_t Size;
+ uint64_t SizeInBits;
AtomicOrdering Ordering;
};
@@ -651,6 +652,20 @@ public:
return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
}
+ /// Widen the scalar to match the size of another.
+ LegalizeRuleSet &minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx) {
+ typeIdx(TypeIdx);
+ return widenScalarIf(
+ [=](const LegalityQuery &Query) {
+ return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
+ Query.Types[TypeIdx].getSizeInBits();
+ },
+ [=](const LegalityQuery &Query) {
+ return std::make_pair(TypeIdx,
+ Query.Types[LargeTypeIdx].getElementType());
+ });
+ }
+
/// Add more elements to the vector to reach the next power of two.
/// No effect if the type is not a vector or the element count is a power of
/// two.
@@ -693,6 +708,8 @@ public:
},
[=](const LegalityQuery &Query) {
LLT VecTy = Query.Types[TypeIdx];
+ if (MaxElements == 1)
+ return std::make_pair(TypeIdx, VecTy.getElementType());
return std::make_pair(
TypeIdx, LLT::vector(MaxElements, VecTy.getScalarSizeInBits()));
});
@@ -947,9 +964,9 @@ public:
bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
- virtual bool legalizeCustom(MachineInstr &MI,
- MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder) const;
+ virtual bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIRBuilder,
+ GISelChangeObserver &Observer) const;
private:
/// Determine what action should be taken to legalize the given generic
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index ac1673de5f3f..37de8f030410 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/Types.h"
#include "llvm/CodeGen/LowLevelType.h"
@@ -30,6 +31,7 @@ namespace llvm {
class MachineFunction;
class MachineInstr;
class TargetInstrInfo;
+class GISelChangeObserver;
/// Class which stores all the state required in a MachineIRBuilder.
/// Since MachineIRBuilders will only store state in this object, it allows
@@ -50,62 +52,177 @@ struct MachineIRBuilderState {
MachineBasicBlock::iterator II;
/// @}
- std::function<void(MachineInstr *)> InsertedInstr;
+ GISelChangeObserver *Observer;
+
+ GISelCSEInfo *CSEInfo;
};
-/// Helper class to build MachineInstr.
-/// It keeps internally the insertion point and debug location for all
-/// the new instructions we want to create.
-/// This information can be modify via the related setters.
-class MachineIRBuilderBase {
+class DstOp {
+ union {
+ LLT LLTTy;
+ unsigned Reg;
+ const TargetRegisterClass *RC;
+ };
- MachineIRBuilderState State;
- const TargetInstrInfo &getTII() {
- assert(State.TII && "TargetInstrInfo is not set");
- return *State.TII;
+public:
+ enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
+ DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
+ DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
+ DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
+
+ void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const {
+ switch (Ty) {
+ case DstType::Ty_Reg:
+ MIB.addDef(Reg);
+ break;
+ case DstType::Ty_LLT:
+ MIB.addDef(MRI.createGenericVirtualRegister(LLTTy));
+ break;
+ case DstType::Ty_RC:
+ MIB.addDef(MRI.createVirtualRegister(RC));
+ break;
+ }
}
- void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+ LLT getLLTTy(const MachineRegisterInfo &MRI) const {
+ switch (Ty) {
+ case DstType::Ty_RC:
+ return LLT{};
+ case DstType::Ty_LLT:
+ return LLTTy;
+ case DstType::Ty_Reg:
+ return MRI.getType(Reg);
+ }
+ llvm_unreachable("Unrecognised DstOp::DstType enum");
+ }
-protected:
- unsigned getDestFromArg(unsigned Reg) { return Reg; }
- unsigned getDestFromArg(LLT Ty) {
- return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+ unsigned getReg() const {
+ assert(Ty == DstType::Ty_Reg && "Not a register");
+ return Reg;
}
- unsigned getDestFromArg(const TargetRegisterClass *RC) {
- return getMF().getRegInfo().createVirtualRegister(RC);
+
+ const TargetRegisterClass *getRegClass() const {
+ switch (Ty) {
+ case DstType::Ty_RC:
+ return RC;
+ default:
+ llvm_unreachable("Not a RC Operand");
+ }
}
- void addUseFromArg(MachineInstrBuilder &MIB, unsigned Reg) {
- MIB.addUse(Reg);
+ DstType getDstOpKind() const { return Ty; }
+
+private:
+ DstType Ty;
+};
+
+class SrcOp {
+ union {
+ MachineInstrBuilder SrcMIB;
+ unsigned Reg;
+ CmpInst::Predicate Pred;
+ };
+
+public:
+ enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
+ SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
+ SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
+ SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
+
+ void addSrcToMIB(MachineInstrBuilder &MIB) const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ MIB.addPredicate(Pred);
+ break;
+ case SrcType::Ty_Reg:
+ MIB.addUse(Reg);
+ break;
+ case SrcType::Ty_MIB:
+ MIB.addUse(SrcMIB->getOperand(0).getReg());
+ break;
+ }
}
- void addUseFromArg(MachineInstrBuilder &MIB, const MachineInstrBuilder &UseMIB) {
- MIB.addUse(UseMIB->getOperand(0).getReg());
+ LLT getLLTTy(const MachineRegisterInfo &MRI) const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ llvm_unreachable("Not a register operand");
+ case SrcType::Ty_Reg:
+ return MRI.getType(Reg);
+ case SrcType::Ty_MIB:
+ return MRI.getType(SrcMIB->getOperand(0).getReg());
+ }
+ llvm_unreachable("Unrecognised SrcOp::SrcType enum");
}
- void addUsesFromArgs(MachineInstrBuilder &MIB) { }
- template<typename UseArgTy, typename ... UseArgsTy>
- void addUsesFromArgs(MachineInstrBuilder &MIB, UseArgTy &&Arg1, UseArgsTy &&... Args) {
- addUseFromArg(MIB, Arg1);
- addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+ unsigned getReg() const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ llvm_unreachable("Not a register operand");
+ case SrcType::Ty_Reg:
+ return Reg;
+ case SrcType::Ty_MIB:
+ return SrcMIB->getOperand(0).getReg();
+ }
+ llvm_unreachable("Unrecognised SrcOp::SrcType enum");
}
- unsigned getRegFromArg(unsigned Reg) { return Reg; }
- unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
- return MIB->getOperand(0).getReg();
+
+ CmpInst::Predicate getPredicate() const {
+ switch (Ty) {
+ case SrcType::Ty_Predicate:
+ return Pred;
+ default:
+ llvm_unreachable("Not a register operand");
+ }
}
- void validateBinaryOp(unsigned Res, unsigned Op0, unsigned Op1);
+ SrcType getSrcOpKind() const { return Ty; }
+
+private:
+ SrcType Ty;
+};
+
+class FlagsOp {
+ Optional<unsigned> Flags;
+
+public:
+ explicit FlagsOp(unsigned F) : Flags(F) {}
+ FlagsOp() : Flags(None) {}
+ Optional<unsigned> getFlags() const { return Flags; }
+};
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilder {
+
+ MachineIRBuilderState State;
+
+protected:
+ void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend);
+
+ void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
+
+ void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty,
+ const LLT &Op1Ty);
+ void recordInsertion(MachineInstr *MI) const;
public:
/// Some constructors for easy use.
- MachineIRBuilderBase() = default;
- MachineIRBuilderBase(MachineFunction &MF) { setMF(MF); }
- MachineIRBuilderBase(MachineInstr &MI) : MachineIRBuilderBase(*MI.getMF()) {
+ MachineIRBuilder() = default;
+ MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+ MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
setInstr(MI);
}
- MachineIRBuilderBase(const MachineIRBuilderState &BState) : State(BState) {}
+ virtual ~MachineIRBuilder() = default;
+
+ MachineIRBuilder(const MachineIRBuilderState &BState) : State(BState) {}
+
+ const TargetInstrInfo &getTII() {
+ assert(State.TII && "TargetInstrInfo is not set");
+ return *State.TII;
+ }
/// Getter for the function we currently build.
MachineFunction &getMF() {
@@ -118,16 +235,25 @@ public:
/// Getter for MRI
MachineRegisterInfo *getMRI() { return State.MRI; }
+ const MachineRegisterInfo *getMRI() const { return State.MRI; }
/// Getter for the State
MachineIRBuilderState &getState() { return State; }
/// Getter for the basic block we currently build.
- MachineBasicBlock &getMBB() {
+ const MachineBasicBlock &getMBB() const {
assert(State.MBB && "MachineBasicBlock is not set");
return *State.MBB;
}
+ MachineBasicBlock &getMBB() {
+ return const_cast<MachineBasicBlock &>(
+ const_cast<const MachineIRBuilder *>(this)->getMBB());
+ }
+
+ GISelCSEInfo *getCSEInfo() { return State.CSEInfo; }
+ const GISelCSEInfo *getCSEInfo() const { return State.CSEInfo; }
+
/// Current insertion point for new instructions.
MachineBasicBlock::iterator getInsertPt() { return State.II; }
@@ -137,10 +263,12 @@ public:
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
/// @}
+ void setCSEInfo(GISelCSEInfo *Info);
+
/// \name Setters for the insertion point.
/// @{
/// Set the MachineFunction where to build instructions.
- void setMF(MachineFunction &);
+ void setMF(MachineFunction &MF);
/// Set the insertion point to the end of \p MBB.
/// \pre \p MBB must be contained by getMF().
@@ -151,12 +279,8 @@ public:
void setInstr(MachineInstr &MI);
/// @}
- /// \name Control where instructions we create are recorded (typically for
- /// visiting again later during legalization).
- /// @{
- void recordInsertion(MachineInstr *InsertedInstr) const;
- void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
- void stopRecordingInsertions();
+ void setChangeObserver(GISelChangeObserver &Observer);
+ void stopObservingChanges();
/// @}
/// Set the debug location to \p DL for all the next build instructions.
@@ -208,6 +332,10 @@ public:
const MDNode *Variable,
const MDNode *Expr);
+ /// Build and insert a DBG_LABEL instructions specifying that \p Label is
+ /// given. Convert "llvm.dbg.label Label" to "DBG_LABEL Label".
+ MachineInstrBuilder buildDbgLabel(const MDNode *Label);
+
/// Build and insert \p Res = G_FRAME_INDEX \p Idx
///
/// G_FRAME_INDEX materializes the address of an alloca value or other
@@ -296,9 +424,9 @@ public:
/// registers with the same scalar type (typically s1)
///
/// \return The newly created instruction.
- MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
- unsigned Op1, unsigned CarryIn);
-
+ MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut,
+ const SrcOp &Op0, const SrcOp &Op1,
+ const SrcOp &CarryIn);
/// Build and insert \p Res = G_ANYEXT \p Op0
///
@@ -314,11 +442,7 @@ public:
///
/// \return The newly created instruction.
- MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
- template <typename DstType, typename ArgType>
- MachineInstrBuilder buildAnyExt(DstType &&Res, ArgType &&Arg) {
- return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
- }
+ MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = G_SEXT \p Op
///
@@ -332,11 +456,7 @@ public:
/// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
- template <typename DstType, typename ArgType>
- MachineInstrBuilder buildSExt(DstType &&Res, ArgType &&Arg) {
- return buildSExt(getDestFromArg(Res), getRegFromArg(Arg));
- }
- MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = G_ZEXT \p Op
///
@@ -350,11 +470,7 @@ public:
/// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
- template <typename DstType, typename ArgType>
- MachineInstrBuilder buildZExt(DstType &&Res, ArgType &&Arg) {
- return buildZExt(getDestFromArg(Res), getRegFromArg(Arg));
- }
- MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
@@ -364,11 +480,7 @@ public:
/// \pre \p Op must be a generic virtual register with scalar or vector type.
///
/// \return The newly created instruction.
- template <typename DstTy, typename UseArgTy>
- MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
- return buildSExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
- }
- MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
@@ -378,11 +490,7 @@ public:
/// \pre \p Op must be a generic virtual register with scalar or vector type.
///
/// \return The newly created instruction.
- template <typename DstTy, typename UseArgTy>
- MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
- return buildZExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
- }
- MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op);
// Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
@@ -392,11 +500,7 @@ public:
/// \pre \p Op must be a generic virtual register with scalar or vector type.
///
/// \return The newly created instruction.
- template <typename DstTy, typename UseArgTy>
- MachineInstrBuilder buildAnyExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
- return buildAnyExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
- }
- MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
/// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
@@ -407,15 +511,11 @@ public:
/// \pre \p Op must be a generic virtual register with scalar or vector type.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, unsigned Res,
- unsigned Op);
+ MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res,
+ const SrcOp &Op);
/// Build and insert an appropriate cast between two registers of equal size.
- template <typename DstType, typename ArgType>
- MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg) {
- return buildCast(getDestFromArg(Res), getRegFromArg(Arg));
- }
- MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
+ MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src);
/// Build and insert G_BR \p Dest
///
@@ -460,7 +560,8 @@ public:
/// type.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val);
+ virtual MachineInstrBuilder buildConstant(const DstOp &Res,
+ const ConstantInt &Val);
/// Build and insert \p Res = G_CONSTANT \p Val
///
@@ -470,12 +571,8 @@ public:
/// \pre \p Res must be a generic virtual register with scalar type.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+ MachineInstrBuilder buildConstant(const DstOp &Res, int64_t Val);
- template <typename DstType>
- MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
- return buildConstant(getDestFromArg(Res), Val);
- }
/// Build and insert \p Res = G_FCONSTANT \p Val
///
/// G_FCONSTANT is a floating-point constant with the specified size and
@@ -485,17 +582,10 @@ public:
/// \pre \p Res must be a generic virtual register with scalar type.
///
/// \return The newly created instruction.
- template <typename DstType>
- MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val) {
- return buildFConstant(getDestFromArg(Res), Val);
- }
- MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
+ virtual MachineInstrBuilder buildFConstant(const DstOp &Res,
+ const ConstantFP &Val);
- template <typename DstType>
- MachineInstrBuilder buildFConstant(DstType &&Res, double Val) {
- return buildFConstant(getDestFromArg(Res), Val);
- }
- MachineInstrBuilder buildFConstant(unsigned Res, double Val);
+ MachineInstrBuilder buildFConstant(const DstOp &Res, double Val);
/// Build and insert \p Res = COPY Op
///
@@ -504,11 +594,7 @@ public:
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
- template <typename DstType, typename SrcType>
- MachineInstrBuilder buildCopy(DstType &&Res, SrcType &&Src) {
- return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
- }
+ MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op);
/// Build and insert `Res = G_LOAD Addr, MMO`.
///
@@ -555,10 +641,7 @@ public:
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
/// Build and insert \p Res = IMPLICIT_DEF.
- template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
- return buildUndef(getDestFromArg(Res));
- }
- MachineInstrBuilder buildUndef(unsigned Res);
+ MachineInstrBuilder buildUndef(const DstOp &Res);
/// Build and insert instructions to put \p Ops together at the specified p
/// Indices to form a larger register.
@@ -587,7 +670,7 @@ public:
/// \pre The type of all \p Ops registers must be identical.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
+ MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<unsigned> Ops);
/// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
@@ -599,7 +682,50 @@ public:
/// \pre The type of all \p Res registers must be identical.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, unsigned Op);
+ MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
+ MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, const SrcOp &Op);
+
+ /// Build and insert \p Res = G_BUILD_VECTOR \p Op0, ...
+ ///
+ /// G_BUILD_VECTOR creates a vector value from multiple scalar registers.
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the
+ /// input scalar registers.
+ /// \pre The type of all \p Ops registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBuildVector(const DstOp &Res,
+ ArrayRef<unsigned> Ops);
+
+ /// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ...
+ ///
+ /// G_BUILD_VECTOR_TRUNC creates a vector value from multiple scalar registers
+ /// which have types larger than the destination vector element type, and
+ /// truncates the values to fit.
+ ///
+ /// If the operands given are already the same size as the vector elt type,
+ /// then this method will instead create a G_BUILD_VECTOR instruction.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The type of all \p Ops registers must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
+ ArrayRef<unsigned> Ops);
+
+ /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
+ ///
+ /// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
+ /// vectors.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The entire register \p Res (and no more) must be covered by the input
+ /// registers.
+ /// \pre The type of all source operands must be identical.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildConcatVectors(const DstOp &Res,
+ ArrayRef<unsigned> Ops);
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
unsigned Op, unsigned Index);
@@ -627,11 +753,7 @@ public:
/// \pre \p Res must be smaller than \p Op
///
/// \return The newly created instruction.
- template <typename DstType, typename SrcType>
- MachineInstrBuilder buildFPTrunc(DstType &&Res, SrcType &&Src) {
- return buildFPTrunc(getDestFromArg(Res), getRegFromArg(Src));
- }
- MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
+ MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op);
/// Build and insert \p Res = G_TRUNC \p Op
///
@@ -644,11 +766,7 @@ public:
/// \pre \p Res must be smaller than \p Op
///
/// \return The newly created instruction.
- MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
- template <typename DstType, typename SrcType>
- MachineInstrBuilder buildTrunc(DstType &&Res, SrcType &&Src) {
- return buildTrunc(getDestFromArg(Res), getRegFromArg(Src));
- }
+ MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op);
/// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
///
@@ -662,8 +780,8 @@ public:
/// \pre \p Pred must be an integer predicate.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
- unsigned Res, unsigned Op0, unsigned Op1);
+ MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res,
+ const SrcOp &Op0, const SrcOp &Op1);
/// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
///
@@ -677,8 +795,8 @@ public:
/// \pre \p Pred must be a floating-point predicate.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
- unsigned Res, unsigned Op0, unsigned Op1);
+ MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
+ const SrcOp &Op0, const SrcOp &Op1);
/// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
///
@@ -690,8 +808,8 @@ public:
/// elements as the other parameters.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
- unsigned Op0, unsigned Op1);
+ MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
+ const SrcOp &Op0, const SrcOp &Op1);
/// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
/// \p Elt, \p Idx
@@ -703,8 +821,10 @@ public:
/// with scalar type.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
- unsigned Elt, unsigned Idx);
+ MachineInstrBuilder buildInsertVectorElement(const DstOp &Res,
+ const SrcOp &Val,
+ const SrcOp &Elt,
+ const SrcOp &Idx);
/// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
///
@@ -714,8 +834,9 @@ public:
/// \pre \p Idx must be a generic virtual register with scalar type.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
- unsigned Idx);
+ MachineInstrBuilder buildExtractVectorElement(const DstOp &Res,
+ const SrcOp &Val,
+ const SrcOp &Idx);
/// Build and insert `OldValRes<def>, SuccessRes<def> =
/// G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO`.
@@ -952,19 +1073,7 @@ public:
///
/// \return The newly created instruction.
MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA);
-};
-
-/// A CRTP class that contains methods for building instructions that can
-/// be constant folded. MachineIRBuilders that want to inherit from this will
-/// need to implement buildBinaryOp (for constant folding binary ops).
-/// Alternatively, they can implement buildInstr(Opc, Dst, Uses...) to perform
-/// additional folding for Opc.
-template <typename Base>
-class FoldableInstructionsBuilder : public MachineIRBuilderBase {
- Base &base() { return static_cast<Base &>(*this); }
-public:
- using MachineIRBuilderBase::MachineIRBuilderBase;
/// Build and insert \p Res = G_ADD \p Op0, \p Op1
///
/// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
@@ -976,13 +1085,10 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildAdd(unsigned Dst, unsigned Src0, unsigned Src1) {
- return base().buildBinaryOp(TargetOpcode::G_ADD, Dst, Src0, Src1);
- }
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = base().getDestFromArg(Ty);
- return base().buildAdd(Res, (base().getRegFromArg(UseArgs))...);
+ MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_SUB \p Op0, \p Op1
@@ -996,13 +1102,10 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSub(unsigned Dst, unsigned Src0, unsigned Src1) {
- return base().buildBinaryOp(TargetOpcode::G_SUB, Dst, Src0, Src1);
- }
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = base().getDestFromArg(Ty);
- return base().buildSub(Res, (base().getRegFromArg(UseArgs))...);
+ MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_MUL \p Op0, \p Op1
@@ -1015,13 +1118,10 @@ public:
/// with the same (scalar or vector) type).
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildMul(unsigned Dst, unsigned Src0, unsigned Src1) {
- return base().buildBinaryOp(TargetOpcode::G_MUL, Dst, Src0, Src1);
- }
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = base().getDestFromArg(Ty);
- return base().buildMul(Res, (base().getRegFromArg(UseArgs))...);
+ MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_AND \p Op0, \p Op1
@@ -1035,13 +1135,9 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildAnd(unsigned Dst, unsigned Src0, unsigned Src1) {
- return base().buildBinaryOp(TargetOpcode::G_AND, Dst, Src0, Src1);
- }
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildAnd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = base().getDestFromArg(Ty);
- return base().buildAnd(Res, (base().getRegFromArg(UseArgs))...);
+ MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_AND, {Dst}, {Src0, Src1});
}
/// Build and insert \p Res = G_OR \p Op0, \p Op1
@@ -1054,39 +1150,14 @@ public:
/// with the same (scalar or vector) type).
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildOr(unsigned Dst, unsigned Src0, unsigned Src1) {
- return base().buildBinaryOp(TargetOpcode::G_OR, Dst, Src0, Src1);
- }
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildOr(DstTy &&Ty, UseArgsTy &&... UseArgs) {
- unsigned Res = base().getDestFromArg(Ty);
- return base().buildOr(Res, (base().getRegFromArg(UseArgs))...);
+ MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1) {
+ return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1});
}
-};
-class MachineIRBuilder : public FoldableInstructionsBuilder<MachineIRBuilder> {
-public:
- using FoldableInstructionsBuilder<
- MachineIRBuilder>::FoldableInstructionsBuilder;
- MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
- unsigned Src0, unsigned Src1) {
- validateBinaryOp(Dst, Src0, Src1);
- return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
- }
- using FoldableInstructionsBuilder<MachineIRBuilder>::buildInstr;
- /// DAG like Generic method for building arbitrary instructions as above.
- /// \Opc opcode for the instruction.
- /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
- /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
- /// Uses of type MachineInstrBuilder will perform
- /// getOperand(0).getReg() to convert to register.
- template <typename DstTy, typename... UseArgsTy>
- MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
- UseArgsTy &&... Args) {
- auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
- addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
- return MIB;
- }
+ virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flags = None);
};
} // End namespace llvm.
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 82fd7eddb68a..c33b32b2db40 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -103,8 +103,8 @@ public:
/// Currently the TableGen-like file would look like:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add*/ {0, 32, GPR},
- /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+ /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first vec elt.
+ /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
/// /*<2x32-bit> vadd {0, 64, VPR}
/// }; // PartialMapping duplicated.
///
@@ -118,14 +118,15 @@ public:
/// With the array of pointer, we would have:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add*/ {0, 32, GPR},
+ /// /*32-bit add lower */ {0, 32, GPR},
+ /// /*32-bit add upper */ {32, 32, GPR},
/// /*<2x32-bit> vadd {0, 64, VPR}
/// }; // No more duplication.
///
/// BreakDowns[] = {
/// /*AddBreakDown*/ &PartialMapping[0],
- /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
- /// /*VAddBreakDown*/ &PartialMapping[1]
+ /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
+ /// /*VAddBreakDown*/ &PartialMapping[2]
/// }; // Addresses of PartialMapping duplicated (smaller).
///
/// ValueMapping[] {
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 51e3a2732972..82b791d35b2b 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -108,5 +108,8 @@ APFloat getAPFloatFromSize(double Val, unsigned Size);
/// fallback.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
+Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
+ const unsigned Op2,
+ const MachineRegisterInfo &MRI);
} // End namespace llvm.
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 80bd796d5374..9c918ae1104f 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -70,7 +70,7 @@ namespace ISD {
/// of the frame or return address to return. An index of zero corresponds
/// to the current function's frame or return address, an index of one to
/// the parent's frame or return address, and so on.
- FRAMEADDR, RETURNADDR, ADDROFRETURNADDR,
+ FRAMEADDR, RETURNADDR, ADDROFRETURNADDR, SPONENTRY,
/// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
/// Materializes the offset from the local object pointer of another
@@ -256,6 +256,29 @@ namespace ISD {
/// Same for multiplication.
SMULO, UMULO,
+ /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
+ /// integers with the same bit width (W). If the true value of LHS + RHS
+ /// exceeds the largest value that can be represented by W bits, the
+ /// resulting value is this maximum value. Otherwise, if this value is less
+ /// than the smallest value that can be represented by W bits, the
+ /// resulting value is this minimum value.
+ SADDSAT, UADDSAT,
+
+ /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
+ /// integers with the same bit width (W). If the true value of LHS - RHS
+ /// exceeds the largest value that can be represented by W bits, the
+ /// resulting value is this maximum value. Otherwise, if this value is less
+ /// than the smallest value that can be represented by W bits, the
+ /// resulting value is this minimum value.
+ SSUBSAT, USUBSAT,
+
+ /// RESULT = SMULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on
+ /// 2 integers with the same width and scale. SCALE represents the scale of
+ /// both operands as fixed point numbers. This SCALE parameter must be a
+ /// constant integer. A scale of zero is effectively performing
+ /// multiplication on 2 integers.
+ SMULFIX,
+
/// Simple binary floating point operators.
FADD, FSUB, FMUL, FDIV, FREM,
@@ -272,7 +295,8 @@ namespace ISD {
/// They are used to limit optimizations while the DAG is being optimized.
STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
- STRICT_FRINT, STRICT_FNEARBYINT,
+ STRICT_FRINT, STRICT_FNEARBYINT, STRICT_FMAXNUM, STRICT_FMINNUM,
+ STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND, STRICT_FTRUNC,
/// FMA - Perform a * b + c with no intermediate rounding step.
FMA,
@@ -377,9 +401,13 @@ namespace ISD {
/// When the 1st operand is a vector, the shift amount must be in the same
/// type. (TLI.getShiftAmountTy() will return the same type when the input
/// type is a vector.)
- /// For rotates, the shift amount is treated as an unsigned amount modulo
- /// the element size of the first operand.
- SHL, SRA, SRL, ROTL, ROTR,
+ /// For rotates and funnel shifts, the shift amount is treated as an unsigned
+ /// amount modulo the element size of the first operand.
+ ///
+ /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
+ /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
+ /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
+ SHL, SRA, SRL, ROTL, ROTR, FSHL, FSHR,
/// Byte Swap and Counting operators.
BSWAP, CTTZ, CTLZ, CTPOP, BITREVERSE,
@@ -461,31 +489,33 @@ namespace ISD {
/// in-register any-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
- /// operand type and the result type match. Each of the low operand
- /// elements is any-extended into the corresponding, wider result
- /// elements with the high bits becoming undef.
+ /// operand type is less than or equal to the size of the result type. Each
+ /// of the low operand elements is any-extended into the corresponding,
+ /// wider result elements with the high bits becoming undef.
+ /// NOTE: The type legalizer prefers to make the operand and result size
+ /// the same to allow expansion to shuffle vector during op legalization.
ANY_EXTEND_VECTOR_INREG,
/// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
/// in-register sign-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
- /// operand type and the result type match. Each of the low operand
- /// elements is sign-extended into the corresponding, wider result
- /// elements.
- // FIXME: The SIGN_EXTEND_INREG node isn't specifically limited to
- // scalars, but it also doesn't handle vectors well. Either it should be
- // restricted to scalars or this node (and its handling) should be merged
- // into it.
+ /// operand type is less than or equal to the size of the result type. Each
+ /// of the low operand elements is sign-extended into the corresponding,
+ /// wider result elements.
+ /// NOTE: The type legalizer prefers to make the operand and result size
+ /// the same to allow expansion to shuffle vector during op legalization.
SIGN_EXTEND_VECTOR_INREG,
/// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
/// in-register zero-extension of the low lanes of an integer vector. The
/// result type must have fewer elements than the operand type, and those
/// elements must be larger integer types such that the total size of the
- /// operand type and the result type match. Each of the low operand
- /// elements is zero-extended into the corresponding, wider result
- /// elements.
+ /// operand type is less than or equal to the size of the result type. Each
+ /// of the low operand elements is zero-extended into the corresponding,
+ /// wider result elements.
+ /// NOTE: The type legalizer prefers to make the operand and result size
+ /// the same to allow expansion to shuffle vector during op legalization.
ZERO_EXTEND_VECTOR_INREG,
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
@@ -550,22 +580,29 @@ namespace ISD {
/// is often a storage-only type but has native conversions.
FP16_TO_FP, FP_TO_FP16,
- /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
- /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
- /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
- /// floating point operations. These are inspired by libm.
- FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ /// Perform various unary floating-point operations inspired by libm.
+ FNEG, FABS, FSQRT, FCBRT, FSIN, FCOS, FPOWI, FPOW,
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
/// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
/// values.
- /// In the case where a single input is NaN, the non-NaN input is returned.
+ //
+ /// In the case where a single input is a NaN (either signaling or quiet),
+ /// the non-NaN input is returned.
///
/// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
FMINNUM, FMAXNUM,
- /// FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that
- /// when a single input is NaN, NaN is returned.
- FMINNAN, FMAXNAN,
+
+ /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
+ /// two values, following the IEEE-754 2008 definition. This differs from
+ /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
+ /// signaling NaN, returns a quiet NaN.
+ FMINNUM_IEEE, FMAXNUM_IEEE,
+
+ /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
+ /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
+ /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
+ FMINIMUM, FMAXIMUM,
/// FSINCOS - Compute both fsin and fcos as a single operation.
FSINCOS,
@@ -786,11 +823,20 @@ namespace ISD {
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
// masked-off lanes.
+ //
+ // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
+ // OutChain = MSTORE(Value, BasePtr, Mask)
MLOAD, MSTORE,
// Masked gather and scatter - load and store operations for a vector of
// random addresses with additional mask operand that prevents memory
// accesses to the masked-off lanes.
+ //
+ // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
+ // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
+ //
+ // The Index operand can have more vector elements than the other operands
+ // due to type legalization. The extra elements are ignored.
MGATHER, MSCATTER,
/// This corresponds to the llvm.lifetime.* intrinsics. The first operand
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
index c3046da90b8d..38fcb37b1e69 100644
--- a/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -15,7 +15,7 @@
#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
-#include "llvm/CodeGen/GCs.h"
+#include "llvm/CodeGen/BuiltinGCs.h"
#include <cstdlib>
namespace {
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
index fee131e4a3c6..18c13ca8f598 100644
--- a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -15,7 +15,7 @@
#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
-#include "llvm/CodeGen/GCs.h"
+#include "llvm/CodeGen/BuiltinGCs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/Target/TargetMachine.h"
@@ -36,11 +36,7 @@ namespace {
(void) llvm::createGreedyRegisterAllocator();
(void) llvm::createDefaultPBQPRegisterAllocator();
- llvm::linkCoreCLRGC();
- llvm::linkOcamlGC();
- llvm::linkErlangGC();
- llvm::linkShadowStackGC();
- llvm::linkStatepointExampleGC();
+ llvm::linkAllBuiltinGCs();
(void) llvm::createBURRListDAGScheduler(nullptr,
llvm::CodeGenOpt::Default);
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervals.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervals.h
index 291a07a712cb..16ab1dc475c4 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervals.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervals.h
@@ -198,10 +198,10 @@ class VirtRegMap;
void pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints);
- /// This function should not be used. Its intend is to tell you that
- /// you are doing something wrong if you call pruveValue directly on a
+ /// This function should not be used. Its intent is to tell you that you are
+ /// doing something wrong if you call pruneValue directly on a
/// LiveInterval. Indeed, you are supposed to call pruneValue on the main
- /// LiveRange and all the LiveRange of the subranges if any.
+ /// LiveRange and all the LiveRanges of the subranges if any.
LLVM_ATTRIBUTE_UNUSED void pruneValue(LiveInterval &, SlotIndex,
SmallVectorImpl<SlotIndex> *) {
llvm_unreachable(
diff --git a/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h b/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h
index 301a45066b4c..7312902e21b7 100644
--- a/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h
+++ b/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h
@@ -48,7 +48,8 @@ class raw_ostream;
/// when walking backward/forward through a basic block.
class LivePhysRegs {
const TargetRegisterInfo *TRI = nullptr;
- SparseSet<unsigned> LiveRegs;
+ using RegisterSet = SparseSet<MCPhysReg, identity<MCPhysReg>>;
+ RegisterSet LiveRegs;
public:
/// Constructs an unitialized set. init() needs to be called to initialize it.
@@ -76,7 +77,7 @@ public:
bool empty() const { return LiveRegs.empty(); }
/// Adds a physical register and all its sub-registers to the set.
- void addReg(unsigned Reg) {
+ void addReg(MCPhysReg Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
@@ -86,7 +87,7 @@ public:
/// Removes a physical register, all its sub-registers, and all its
/// super-registers from the set.
- void removeReg(unsigned Reg) {
+ void removeReg(MCPhysReg Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
for (MCRegAliasIterator R(Reg, TRI, true); R.isValid(); ++R)
@@ -95,7 +96,7 @@ public:
/// Removes physical registers clobbered by the regmask operand \p MO.
void removeRegsInMask(const MachineOperand &MO,
- SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
+ SmallVectorImpl<std::pair<MCPhysReg, const MachineOperand*>> *Clobbers =
nullptr);
/// Returns true if register \p Reg is contained in the set. This also
@@ -103,10 +104,10 @@ public:
/// addReg() always adds all sub-registers to the set as well.
/// Note: Returns false if just some sub registers are live, use available()
/// when searching a free register.
- bool contains(unsigned Reg) const { return LiveRegs.count(Reg); }
+ bool contains(MCPhysReg Reg) const { return LiveRegs.count(Reg); }
/// Returns true if register \p Reg and no aliasing register is in the set.
- bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
+ bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const;
/// Remove defined registers and regmask kills from the set.
void removeDefs(const MachineInstr &MI);
@@ -126,7 +127,7 @@ public:
/// defined or clobbered by a regmask. The operand will identify whether this
/// is a regmask or register operand.
void stepForward(const MachineInstr &MI,
- SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
+ SmallVectorImpl<std::pair<MCPhysReg, const MachineOperand*>> &Clobbers);
/// Adds all live-in registers of basic block \p MBB.
/// Live in registers are the registers in the blocks live-in list and the
@@ -143,7 +144,7 @@ public:
/// registers.
void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
- using const_iterator = SparseSet<unsigned>::const_iterator;
+ using const_iterator = RegisterSet::const_iterator;
const_iterator begin() const { return LiveRegs.begin(); }
const_iterator end() const { return LiveRegs.end(); }
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveRegUnits.h b/contrib/llvm/include/llvm/CodeGen/LiveRegUnits.h
index 249545906e01..5e9dd8b3cdf6 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveRegUnits.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveRegUnits.h
@@ -85,14 +85,14 @@ public:
bool empty() const { return Units.none(); }
/// Adds register units covered by physical register \p Reg.
- void addReg(unsigned Reg) {
+ void addReg(MCPhysReg Reg) {
for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
Units.set(*Unit);
}
/// Adds register units covered by physical register \p Reg that are
/// part of the lanemask \p Mask.
- void addRegMasked(unsigned Reg, LaneBitmask Mask) {
+ void addRegMasked(MCPhysReg Reg, LaneBitmask Mask) {
for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
LaneBitmask UnitMask = (*Unit).second;
if (UnitMask.none() || (UnitMask & Mask).any())
@@ -101,7 +101,7 @@ public:
}
/// Removes all register units covered by physical register \p Reg.
- void removeReg(unsigned Reg) {
+ void removeReg(MCPhysReg Reg) {
for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
Units.reset(*Unit);
}
@@ -115,7 +115,7 @@ public:
void addRegsInMask(const uint32_t *RegMask);
/// Returns true if no part of physical register \p Reg is live.
- bool available(unsigned Reg) const {
+ bool available(MCPhysReg Reg) const {
for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
if (Units.test(*Unit))
return false;
diff --git a/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index 7f46406c4789..98ac81915dc0 100644
--- a/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -425,6 +425,7 @@ struct MachineFrameInfo {
StringValue StackProtector;
// TODO: Serialize FunctionContextIdx
unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
+ unsigned CVBytesOfCalleeSavedRegisters = 0;
bool HasOpaqueSPAdjustment = false;
bool HasVAStart = false;
bool HasMustTailInVarArgFunc = false;
@@ -443,6 +444,8 @@ struct MachineFrameInfo {
AdjustsStack == Other.AdjustsStack && HasCalls == Other.HasCalls &&
StackProtector == Other.StackProtector &&
MaxCallFrameSize == Other.MaxCallFrameSize &&
+ CVBytesOfCalleeSavedRegisters ==
+ Other.CVBytesOfCalleeSavedRegisters &&
HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
HasVAStart == Other.HasVAStart &&
HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
@@ -465,6 +468,8 @@ template <> struct MappingTraits<MachineFrameInfo> {
YamlIO.mapOptional("stackProtector", MFI.StackProtector,
StringValue()); // Don't print it out when it's empty.
YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, (unsigned)~0);
+ YamlIO.mapOptional("cvBytesOfCalleeSavedRegisters",
+ MFI.CVBytesOfCalleeSavedRegisters, 0U);
YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment,
false);
YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
@@ -489,6 +494,7 @@ struct MachineFunction {
bool FailedISel = false;
// Register information
bool TracksRegLiveness = false;
+ bool HasWinCFI = false;
std::vector<VirtualRegisterDefinition> VirtualRegisters;
std::vector<MachineFunctionLiveIn> LiveIns;
Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
@@ -512,6 +518,7 @@ template <> struct MappingTraits<MachineFunction> {
YamlIO.mapOptional("selected", MF.Selected, false);
YamlIO.mapOptional("failedISel", MF.FailedISel, false);
YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
+ YamlIO.mapOptional("hasWinCFI", MF.HasWinCFI, false);
YamlIO.mapOptional("registers", MF.VirtualRegisters,
std::vector<VirtualRegisterDefinition>());
YamlIO.mapOptional("liveins", MF.LiveIns,
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index ace33efd8713..ec2f270fcb3f 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -569,6 +569,12 @@ public:
return !empty() && back().isReturn();
}
+ /// Convenience function that returns true if the bock ends in a EH scope
+ /// return instruction.
+ bool isEHScopeReturnBlock() const {
+ return !empty() && back().isEHScopeReturn();
+ }
+
/// Split the critical edge from this block to the given successor block, and
/// return the newly created block, or null if splitting is not possible.
///
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 2d6081f3577d..c2706a21a177 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -28,9 +28,14 @@ class AllocaInst;
/// The CalleeSavedInfo class tracks the information need to locate where a
/// callee saved register is in the current frame.
+/// Callee saved reg can also be saved to a different register rather than
+/// on the stack by setting DstReg instead of FrameIdx.
class CalleeSavedInfo {
unsigned Reg;
- int FrameIdx;
+ union {
+ int FrameIdx;
+ unsigned DstReg;
+ };
/// Flag indicating whether the register is actually restored in the epilog.
/// In most cases, if a register is saved, it is also restored. There are
/// some situations, though, when this is not the case. For example, the
@@ -44,17 +49,29 @@ class CalleeSavedInfo {
/// by implicit uses on the return instructions, however, the required
/// changes in the ARM backend would be quite extensive.
bool Restored;
+ /// Flag indicating whether the register is spilled to stack or another
+ /// register.
+ bool SpilledToReg;
public:
explicit CalleeSavedInfo(unsigned R, int FI = 0)
- : Reg(R), FrameIdx(FI), Restored(true) {}
+ : Reg(R), FrameIdx(FI), Restored(true), SpilledToReg(false) {}
// Accessors.
unsigned getReg() const { return Reg; }
int getFrameIdx() const { return FrameIdx; }
- void setFrameIdx(int FI) { FrameIdx = FI; }
+ unsigned getDstReg() const { return DstReg; }
+ void setFrameIdx(int FI) {
+ FrameIdx = FI;
+ SpilledToReg = false;
+ }
+ void setDstReg(unsigned SpillReg) {
+ DstReg = SpillReg;
+ SpilledToReg = true;
+ }
bool isRestored() const { return Restored; }
void setRestored(bool R) { Restored = R; }
+ bool isSpilledToReg() const { return SpilledToReg; }
};
/// The MachineFrameInfo class represents an abstract stack frame until
@@ -266,10 +283,14 @@ private:
/// It is only valid during and after prolog/epilog code insertion.
unsigned MaxCallFrameSize = ~0u;
+ /// The number of bytes of callee saved registers that the target wants to
+ /// report for the current function in the CodeView S_FRAMEPROC record.
+ unsigned CVBytesOfCalleeSavedRegisters = 0;
+
/// The prolog/epilog code inserter fills in this vector with each
- /// callee saved register saved in the frame. Beyond its use by the prolog/
- /// epilog code inserter, this data used for debug info and exception
- /// handling.
+ /// callee saved register saved in either the frame or a different
+ /// register. Beyond its use by the prolog/ epilog code inserter,
+ /// this data is used for debug info and exception handling.
std::vector<CalleeSavedInfo> CSInfo;
/// Has CSInfo been set yet?
@@ -603,6 +624,15 @@ public:
}
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+ /// Returns how many bytes of callee-saved registers the target pushed in the
+ /// prologue. Only used for debug info.
+ unsigned getCVBytesOfCalleeSavedRegisters() const {
+ return CVBytesOfCalleeSavedRegisters;
+ }
+ void setCVBytesOfCalleeSavedRegisters(unsigned S) {
+ CVBytesOfCalleeSavedRegisters = S;
+ }
+
/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are not pointed to by LLVM IR
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
index e8a4d529faac..25edf5bcce51 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -58,6 +58,7 @@ class DILocalVariable;
class DILocation;
class Function;
class GlobalValue;
+class LLVMTargetMachine;
class MachineConstantPool;
class MachineFrameInfo;
class MachineFunction;
@@ -70,7 +71,6 @@ class Pass;
class PseudoSourceValueManager;
class raw_ostream;
class SlotIndexes;
-class TargetMachine;
class TargetRegisterClass;
class TargetSubtargetInfo;
struct WasmEHFuncInfo;
@@ -225,7 +225,7 @@ struct LandingPadInfo {
class MachineFunction {
const Function &F;
- const TargetMachine &Target;
+ const LLVMTargetMachine &Target;
const TargetSubtargetInfo *STI;
MCContext &Ctx;
MachineModuleInfo &MMI;
@@ -294,7 +294,7 @@ class MachineFunction {
bool HasInlineAsm = false;
/// True if any WinCFI instruction have been emitted in this function.
- Optional<bool> HasWinCFI;
+ bool HasWinCFI = false;
/// Current high-level properties of the IR of the function (e.g. is in SSA
/// form or whether registers have been allocated)
@@ -316,6 +316,9 @@ class MachineFunction {
/// Map a landing pad's EH symbol to the call site indexes.
DenseMap<MCSymbol*, SmallVector<unsigned, 4>> LPadToCallSiteMap;
+ /// Map a landing pad to its index.
+ DenseMap<const MachineBasicBlock *, unsigned> WasmLPadToIndexMap;
+
/// Map of invoke call site index values to associated begin EH_LABEL.
DenseMap<MCSymbol*, unsigned> CallSiteMap;
@@ -363,10 +366,31 @@ public:
int Slot, const DILocation *Loc)
: Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
};
+
+ class Delegate {
+ virtual void anchor();
+
+ public:
+ virtual ~Delegate() = default;
+ /// Callback after an insertion. This should not modify the MI directly.
+ virtual void MF_HandleInsertion(MachineInstr &MI) = 0;
+ /// Callback before a removal. This should not modify the MI directly.
+ virtual void MF_HandleRemoval(MachineInstr &MI) = 0;
+ };
+
+private:
+ Delegate *TheDelegate = nullptr;
+
+ // Callbacks for insertion and removal.
+ void handleInsertion(MachineInstr &MI);
+ void handleRemoval(MachineInstr &MI);
+ friend struct ilist_traits<MachineInstr>;
+
+public:
using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
VariableDbgInfoMapTy VariableDbgInfos;
- MachineFunction(const Function &F, const TargetMachine &Target,
+ MachineFunction(const Function &F, const LLVMTargetMachine &Target,
const TargetSubtargetInfo &STI, unsigned FunctionNum,
MachineModuleInfo &MMI);
MachineFunction(const MachineFunction &) = delete;
@@ -379,6 +403,23 @@ public:
init();
}
+ /// Reset the currently registered delegate - otherwise assert.
+ void resetDelegate(Delegate *delegate) {
+ assert(TheDelegate == delegate &&
+ "Only the current delegate can perform reset!");
+ TheDelegate = nullptr;
+ }
+
+ /// Set the delegate. resetDelegate must be called before attempting
+ /// to set.
+ void setDelegate(Delegate *delegate) {
+ assert(delegate && !TheDelegate &&
+ "Attempted to set delegate to null, or to change it without "
+ "first resetting it!");
+
+ TheDelegate = delegate;
+ }
+
MachineModuleInfo &getMMI() const { return MMI; }
MCContext &getContext() const { return Ctx; }
@@ -397,7 +438,7 @@ public:
unsigned getFunctionNumber() const { return FunctionNumber; }
/// getTarget - Return the target machine this machine code is compiled with
- const TargetMachine &getTarget() const { return Target; }
+ const LLVMTargetMachine &getTarget() const { return Target; }
/// getSubtarget - Return the subtarget for which this machine code is being
/// compiled.
@@ -484,8 +525,7 @@ public:
}
bool hasWinCFI() const {
- assert(HasWinCFI.hasValue() && "HasWinCFI not set yet!");
- return *HasWinCFI;
+ return HasWinCFI;
}
void setHasWinCFI(bool v) { HasWinCFI = v; }
@@ -619,6 +659,14 @@ public:
BasicBlocks.sort(comp);
}
+ /// Return the number of \p MachineInstrs in this \p MachineFunction.
+ unsigned getInstructionCount() const {
+ unsigned InstrCount = 0;
+ for (const MachineBasicBlock &MBB : BasicBlocks)
+ InstrCount += MBB.size();
+ return InstrCount;
+ }
+
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
@@ -711,23 +759,14 @@ public:
/// Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegMask();
- /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
- /// pointers. This array is owned by the MachineFunction.
- MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
-
- /// extractLoadMemRefs - Allocate an array and populate it with just the
- /// load information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
-
- /// extractStoreMemRefs - Allocate an array and populate it with just the
- /// store information from the given MachineMemOperand sequence.
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator>
- extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End);
+ /// Allocate and construct an extra info structure for a `MachineInstr`.
+ ///
+ /// This is allocated on the function's allocator and so lives the life of
+ /// the function.
+ MachineInstr::ExtraInfo *
+ createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr);
/// Allocate a string and populate it with the given external symbol name.
const char *createExternalSymbolName(StringRef Name);
@@ -776,7 +815,8 @@ public:
LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
/// Remap landing pad labels and remove any deleted landing pads.
- void tidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
+ void tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap = nullptr,
+ bool TidyIfNoBeginLabels = true);
/// Return a reference to the landing pad info for the current function.
const std::vector<LandingPadInfo> &getLandingPads() const {
@@ -788,7 +828,9 @@ public:
void addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel);
- /// Add a new panding pad. Returns the label ID for the landing pad entry.
+ /// Add a new panding pad, and extract the exception handling information from
+ /// the landingpad instruction. Returns the label ID for the landing pad
+ /// entry.
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
/// Provide the catch typeinfo for a landing pad.
@@ -817,6 +859,22 @@ public:
/// Map the landing pad's EH symbol to the call site indexes.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
+ /// Map the landing pad to its index. Used for Wasm exception handling.
+ void setWasmLandingPadIndex(const MachineBasicBlock *LPad, unsigned Index) {
+ WasmLPadToIndexMap[LPad] = Index;
+ }
+
+ /// Returns true if the landing pad has an associate index in wasm EH.
+ bool hasWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
+ return WasmLPadToIndexMap.count(LPad);
+ }
+
+ /// Get the index in wasm EH for a given landing pad.
+ unsigned getWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
+ assert(hasWasmLandingPadIndex(LPad));
+ return WasmLPadToIndexMap.lookup(LPad);
+ }
+
/// Get the call site indexes for a landing pad EH symbol.
SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
assert(hasCallSiteLandingPad(Sym) &&
@@ -880,15 +938,6 @@ public:
}
};
-/// \name Exception Handling
-/// \{
-
-/// Extract the exception handling information from the landingpad instruction
-/// and add them to the specified machine module info.
-void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB);
-
-/// \}
-
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index 88e13cdf4138..ea1a2a536fc7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -17,16 +17,20 @@
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -61,7 +65,7 @@ class MachineInstr
: public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
ilist_sentinel_tracking<true>> {
public:
- using mmo_iterator = MachineMemOperand **;
+ using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;
/// Flags to specify different kinds of comments to output in
/// assembly code. These flags carry semantic information not
@@ -93,8 +97,14 @@ public:
// contraction operations like fma.
FmAfn = 1 << 9, // Instruction may map to Fast math
// instrinsic approximation.
- FmReassoc = 1 << 10 // Instruction supports Fast math
+ FmReassoc = 1 << 10, // Instruction supports Fast math
// reassociation of operand order.
+ NoUWrap = 1 << 11, // Instruction supports binary operator
+ // no unsigned wrap.
+ NoSWrap = 1 << 12, // Instruction supports binary operator
+ // no signed wrap.
+ IsExact = 1 << 13 // Instruction supports division is
+ // known to be exact.
};
private:
@@ -118,14 +128,102 @@ private:
// anything other than to convey comment
// information to AsmPrinter.
- uint8_t NumMemRefs = 0; // Information on memory references.
- // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
- // Calling code must treat missing information conservatively. If the number
- // of memory operands required to be precise exceeds the maximum value of
- // NumMemRefs - currently 256 - we remove the operands entirely. Note also
- // that this is a non-owning reference to a shared copy on write buffer owned
- // by the MachineFunction and created via MF.allocateMemRefsArray.
- mmo_iterator MemRefs = nullptr;
+ /// Internal implementation detail class that provides out-of-line storage for
+ /// extra info used by the machine instruction when this info cannot be stored
+ /// in-line within the instruction itself.
+ ///
+ /// This has to be defined eagerly due to the implementation constraints of
+ /// `PointerSumType` where it is used.
+ class ExtraInfo final
+ : TrailingObjects<ExtraInfo, MachineMemOperand *, MCSymbol *> {
+ public:
+ static ExtraInfo *create(BumpPtrAllocator &Allocator,
+ ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol = nullptr,
+ MCSymbol *PostInstrSymbol = nullptr) {
+ bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
+ bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
+ auto *Result = new (Allocator.Allocate(
+ totalSizeToAlloc<MachineMemOperand *, MCSymbol *>(
+ MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
+ alignof(ExtraInfo)))
+ ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
+
+ // Copy the actual data into the trailing objects.
+ std::copy(MMOs.begin(), MMOs.end(),
+ Result->getTrailingObjects<MachineMemOperand *>());
+
+ if (HasPreInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
+ if (HasPostInstrSymbol)
+ Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
+ PostInstrSymbol;
+
+ return Result;
+ }
+
+ ArrayRef<MachineMemOperand *> getMMOs() const {
+ return makeArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
+ }
+
+ MCSymbol *getPreInstrSymbol() const {
+ return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
+ }
+
+ MCSymbol *getPostInstrSymbol() const {
+ return HasPostInstrSymbol
+ ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
+ : nullptr;
+ }
+
+ private:
+ friend TrailingObjects;
+
+ // Description of the extra info, used to interpret the actual optional
+ // data appended.
+ //
+ // Note that this is not terribly space optimized. This leaves a great deal
+ // of flexibility to fit more in here later.
+ const int NumMMOs;
+ const bool HasPreInstrSymbol;
+ const bool HasPostInstrSymbol;
+
+ // Implement the `TrailingObjects` internal API.
+ size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
+ return NumMMOs;
+ }
+ size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
+ return HasPreInstrSymbol + HasPostInstrSymbol;
+ }
+
+ // Just a boring constructor to allow us to initialize the sizes. Always use
+ // the `create` routine above.
+ ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
+ : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
+ HasPostInstrSymbol(HasPostInstrSymbol) {}
+ };
+
+ /// Enumeration of the kinds of inline extra info available. It is important
+ /// that the `MachineMemOperand` inline kind has a tag value of zero to make
+ /// it accessible as an `ArrayRef`.
+ enum ExtraInfoInlineKinds {
+ EIIK_MMO = 0,
+ EIIK_PreInstrSymbol,
+ EIIK_PostInstrSymbol,
+ EIIK_OutOfLine
+ };
+
+ // We store extra information about the instruction here. The common case is
+ // expected to be nothing or a single pointer (typically a MMO or a symbol).
+ // We work to optimize this common case by storing it inline here rather than
+ // requiring a separate allocation, but we fall back to an allocation when
+ // multiple pointers are needed.
+ PointerSumType<ExtraInfoInlineKinds,
+ PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
+ PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
+ PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
+ Info;
DebugLoc debugLoc; // Source line information.
@@ -310,7 +408,7 @@ public:
/// Returns the opcode of this MachineInstr.
unsigned getOpcode() const { return MCID->Opcode; }
- /// Access to explicit operands of the instruction.
+ /// Retuns the total number of operands.
unsigned getNumOperands() const { return NumOperands; }
const MachineOperand& getOperand(unsigned i) const {
@@ -412,28 +510,70 @@ public:
return I - operands_begin();
}
- /// Access to memory operands of the instruction
- mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+ /// Access to memory operands of the instruction. If there are none, that does
+ /// not imply anything about whether the function accesses memory. Instead,
+ /// the caller must behave conservatively.
+ ArrayRef<MachineMemOperand *> memoperands() const {
+ if (!Info)
+ return {};
+
+ if (Info.is<EIIK_MMO>())
+ return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
+
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getMMOs();
+
+ return {};
+ }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_begin() const { return memoperands().begin(); }
+
+ /// Access to memory operands of the instruction.
+ ///
+ /// If `memoperands_begin() == memoperands_end()`, that does not imply
+ /// anything about whether the function accesses memory. Instead, the caller
+ /// must behave conservatively.
+ mmo_iterator memoperands_end() const { return memoperands().end(); }
+
/// Return true if we don't have any memory operands which described the
/// memory access done by this instruction. If this is true, calling code
/// must be conservative.
- bool memoperands_empty() const { return NumMemRefs == 0; }
-
- iterator_range<mmo_iterator> memoperands() {
- return make_range(memoperands_begin(), memoperands_end());
- }
- iterator_range<mmo_iterator> memoperands() const {
- return make_range(memoperands_begin(), memoperands_end());
- }
+ bool memoperands_empty() const { return memoperands().empty(); }
/// Return true if this instruction has exactly one MachineMemOperand.
- bool hasOneMemOperand() const {
- return NumMemRefs == 1;
- }
+ bool hasOneMemOperand() const { return memoperands().size() == 1; }
/// Return the number of memory operands.
- unsigned getNumMemOperands() const { return NumMemRefs; }
+ unsigned getNumMemOperands() const { return memoperands().size(); }
+
+ /// Helper to extract a pre-instruction symbol if one has been added.
+ MCSymbol *getPreInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPreInstrSymbol();
+
+ return nullptr;
+ }
+
+ /// Helper to extract a post-instruction symbol if one has been added.
+ MCSymbol *getPostInstrSymbol() const {
+ if (!Info)
+ return nullptr;
+ if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
+ return S;
+ if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
+ return EI->getPostInstrSymbol();
+
+ return nullptr;
+ }
/// API for querying MachineInstr properties. They are the same as MCInstrDesc
/// queries but they are bundle aware.
@@ -450,6 +590,8 @@ public:
/// The second argument indicates whether the query should look inside
/// instruction bundles.
bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+ assert(MCFlag < 64 &&
+ "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.");
// Inline the fast path for unbundled or bundle-internal instructions.
if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
return getDesc().getFlags() & (1ULL << MCFlag);
@@ -482,6 +624,12 @@ public:
return hasProperty(MCID::Return, Type);
}
+ /// Return true if this is an instruction that marks the end of an EH scope,
+ /// i.e., a catchpad or a cleanuppad instruction.
+ bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::EHScopeReturn, Type);
+ }
+
bool isCall(QueryType Type = AnyInBundle) const {
return hasProperty(MCID::Call, Type);
}
@@ -1323,47 +1471,63 @@ public:
/// fewer operand than it started with.
void RemoveOperand(unsigned OpNo);
+ /// Clear this MachineInstr's memory reference descriptor list. This resets
+ /// the memrefs to their most conservative state. This should be used only
+ /// as a last resort since it greatly pessimizes our knowledge of the memory
+ /// access performed by the instruction.
+ void dropMemRefs(MachineFunction &MF);
+
+ /// Assign this MachineInstr's memory reference descriptor list.
+ ///
+ /// Unlike other methods, this *will* allocate them into a new array
+ /// associated with the provided `MachineFunction`.
+ void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);
+
/// Add a MachineMemOperand to the machine instruction.
/// This function should be used only occasionally. The setMemRefs function
/// is the primary method for setting up a MachineInstr's MemRefs list.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
- /// Assign this MachineInstr's memory reference descriptor list.
- /// This does not transfer ownership.
- void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
- }
+ /// Clone another MachineInstr's memory reference descriptor list and replace
+ /// ours with it.
+ ///
+ /// Note that `*this` may be the incoming MI!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
- /// Assign this MachineInstr's memory reference descriptor list. First
- /// element in the pair is the begin iterator/pointer to the array; the
- /// second is the number of MemoryOperands. This does not transfer ownership
- /// of the underlying memory.
- void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
- MemRefs = NewMemRefs.first;
- NumMemRefs = uint8_t(NewMemRefs.second);
- assert(NumMemRefs == NewMemRefs.second &&
- "Too many memrefs - must drop memory operands");
- }
+ /// Clone the merge of multiple MachineInstrs' memory reference descriptors
+ /// list and replace ours with it.
+ ///
+ /// Note that `*this` may be one of the incoming MIs!
+ ///
+ /// Prefer this API whenever possible as it can avoid allocations in common
+ /// cases.
+ void cloneMergedMemRefs(MachineFunction &MF,
+ ArrayRef<const MachineInstr *> MIs);
- /// Return a set of memrefs (begin iterator, size) which conservatively
- /// describe the memory behavior of both MachineInstrs. This is appropriate
- /// for use when merging two MachineInstrs into one. This routine does not
- /// modify the memrefs of the this MachineInstr.
- std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+ /// Set a symbol that will be emitted just prior to the instruction itself.
+ ///
+ /// Setting this to a null pointer will remove any such symbol.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
+
+ /// Set a symbol that will be emitted just after the instruction itself.
+ ///
+ /// Setting this to a null pointer will remove any such symbol.
+ ///
+ /// FIXME: This is not fully implemented yet.
+ void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);
/// Return the MIFlags which represent both MachineInstrs. This
/// should be used when merging two MachineInstrs into one. This routine does
/// not modify the MIFlags of this MachineInstr.
uint16_t mergeFlagsWith(const MachineInstr& Other) const;
- /// Clear this MachineInstr's memory reference descriptor list. This resets
- /// the memrefs to their most conservative state. This should be used only
- /// as a last resort since it greatly pessimizes our knowledge of the memory
- /// access performed by the instruction.
- void dropMemRefs() {
- MemRefs = nullptr;
- NumMemRefs = 0;
- }
+ /// Copy all flags to MachineInst MIFlags
+ void copyIRFlags(const Instruction &I);
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
@@ -1377,6 +1541,13 @@ public:
/// Add all implicit def and use operands to this instruction.
void addImplicitDefUseOperands(MachineFunction &MF);
+ /// Scan instructions following MI and collect any matching DBG_VALUEs.
+ void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);
+
+ /// Find all DBG_VALUEs immediately following this instruction that point
+ /// to a register def in this instruction and point them to \p Reg instead.
+ void changeDebugValuesDefReg(unsigned Reg);
+
private:
/// If this instruction is embedded into a MachineFunction, return the
/// MachineRegisterInfo object for the current function, otherwise
@@ -1394,7 +1565,7 @@ private:
void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// Slow path for hasProperty when we're dealing with a bundle.
- bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
+ bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;
/// Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 665608755741..b5e523f655e7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -191,15 +191,20 @@ public:
return *this;
}
- const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
- MachineInstr::mmo_iterator e) const {
- MI->setMemRefs(b, e);
+ const MachineInstrBuilder &
+ setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
+ MI->setMemRefs(*MF, MMOs);
return *this;
}
- const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
- unsigned> MemOperandsRef) const {
- MI->setMemRefs(MemOperandsRef);
+ const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
+ MI->cloneMemRefs(*MF, OtherMI);
+ return *this;
+ }
+
+ const MachineInstrBuilder &
+ cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
+ MI->cloneMergedMemRefs(*MF, OtherMIs);
return *this;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 554e89019b76..4371420bc7a2 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -46,10 +46,10 @@ namespace llvm {
class BasicBlock;
class CallInst;
class Function;
-class MachineFunction;
+class LLVMTargetMachine;
class MMIAddrLabelMap;
+class MachineFunction;
class Module;
-class TargetMachine;
//===----------------------------------------------------------------------===//
/// This class can be derived from and used by targets to hold private
@@ -76,7 +76,7 @@ protected:
/// for specific use.
///
class MachineModuleInfo : public ImmutablePass {
- const TargetMachine &TM;
+ const LLVMTargetMachine &TM;
/// This is the MCContext used for the entire code generator.
MCContext Context;
@@ -145,7 +145,7 @@ class MachineModuleInfo : public ImmutablePass {
public:
static char ID; // Pass identification, replacement for typeid
- explicit MachineModuleInfo(const TargetMachine *TM = nullptr);
+ explicit MachineModuleInfo(const LLVMTargetMachine *TM = nullptr);
~MachineModuleInfo() override;
// Initialization and Finalization
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
index 6a87fa2fbf00..17df1fa792b7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -80,6 +80,28 @@ public:
SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
};
+/// MachineModuleInfoCOFF - This is a MachineModuleInfoImpl implementation
+/// for COFF targets.
+class MachineModuleInfoCOFF : public MachineModuleInfoImpl {
+ /// GVStubs - These stubs are used to materialize global addresses in PIC
+ /// mode.
+ DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+ virtual void anchor(); // Out of line virtual method.
+
+public:
+ MachineModuleInfoCOFF(const MachineModuleInfo &) {}
+
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return GVStubs[Sym];
+ }
+
+ /// Accessor methods to return the set of stubs in sorted order.
+
+ SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+};
+
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOutliner.h b/contrib/llvm/include/llvm/CodeGen/MachineOutliner.h
index 95bfc24b57ff..bfd1e994053a 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOutliner.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOutliner.h
@@ -61,9 +61,6 @@ public:
/// \p OutlinedFunctions.
unsigned FunctionIdx;
- /// Set to false if the candidate overlapped with another candidate.
- bool InCandidateList = true;
-
/// Identifier denoting the instructions to emit to call an outlined function
/// from this point. Defined by the target.
unsigned CallConstructionID;
@@ -82,6 +79,12 @@ public:
/// been used across the sequence.
LiveRegUnits UsedInSequence;
+ /// Target-specific flags for this Candidate's MBB.
+ unsigned Flags = 0x0;
+
+ /// True if initLRU has been called on this Candidate.
+ bool LRUWasSet = false;
+
/// Return the number of instructions in this Candidate.
unsigned getLength() const { return Len; }
@@ -99,9 +102,7 @@ public:
}
/// Returns the call overhead of this candidate if it is in the list.
- unsigned getCallOverhead() const {
- return InCandidateList ? CallOverhead : 0;
- }
+ unsigned getCallOverhead() const { return CallOverhead; }
MachineBasicBlock::iterator &front() { return FirstInst; }
MachineBasicBlock::iterator &back() { return LastInst; }
@@ -120,9 +121,9 @@ public:
Candidate(unsigned StartIdx, unsigned Len,
MachineBasicBlock::iterator &FirstInst,
MachineBasicBlock::iterator &LastInst, MachineBasicBlock *MBB,
- unsigned FunctionIdx)
+ unsigned FunctionIdx, unsigned Flags)
: StartIdx(StartIdx), Len(Len), FirstInst(FirstInst), LastInst(LastInst),
- MBB(MBB), FunctionIdx(FunctionIdx) {}
+ MBB(MBB), FunctionIdx(FunctionIdx), Flags(Flags) {}
Candidate() {}
/// Used to ensure that \p Candidates are outlined in an order that
@@ -138,6 +139,10 @@ public:
void initLRU(const TargetRegisterInfo &TRI) {
assert(MBB->getParent()->getRegInfo().tracksLiveness() &&
"Candidate's Machine Function must track liveness");
+ // Only initialize once.
+ if (LRUWasSet)
+ return;
+ LRUWasSet = true;
LRU.init(TRI);
LRU.addLiveOuts(*MBB);
@@ -158,24 +163,13 @@ public:
/// class of candidate.
struct OutlinedFunction {
-private:
- /// The number of candidates for this \p OutlinedFunction.
- unsigned OccurrenceCount = 0;
-
public:
- std::vector<std::shared_ptr<Candidate>> Candidates;
+ std::vector<Candidate> Candidates;
/// The actual outlined function created.
/// This is initialized after we go through and create the actual function.
MachineFunction *MF = nullptr;
- /// A number assigned to this function which appears at the end of its name.
- unsigned Name;
-
- /// The sequence of integers corresponding to the instructions in this
- /// function.
- std::vector<unsigned> Sequence;
-
/// Represents the size of a sequence in bytes. (Some instructions vary
/// widely in size, so just counting the instructions isn't very useful.)
unsigned SequenceSize;
@@ -187,49 +181,41 @@ public:
unsigned FrameConstructionID;
/// Return the number of candidates for this \p OutlinedFunction.
- unsigned getOccurrenceCount() { return OccurrenceCount; }
-
- /// Decrement the occurrence count of this OutlinedFunction and return the
- /// new count.
- unsigned decrement() {
- assert(OccurrenceCount > 0 && "Can't decrement an empty function!");
- OccurrenceCount--;
- return getOccurrenceCount();
- }
+ unsigned getOccurrenceCount() const { return Candidates.size(); }
/// Return the number of bytes it would take to outline this
/// function.
- unsigned getOutliningCost() {
+ unsigned getOutliningCost() const {
unsigned CallOverhead = 0;
- for (std::shared_ptr<Candidate> &C : Candidates)
- CallOverhead += C->getCallOverhead();
+ for (const Candidate &C : Candidates)
+ CallOverhead += C.getCallOverhead();
return CallOverhead + SequenceSize + FrameOverhead;
}
/// Return the size in bytes of the unoutlined sequences.
- unsigned getNotOutlinedCost() { return OccurrenceCount * SequenceSize; }
+ unsigned getNotOutlinedCost() const {
+ return getOccurrenceCount() * SequenceSize;
+ }
/// Return the number of instructions that would be saved by outlining
/// this function.
- unsigned getBenefit() {
+ unsigned getBenefit() const {
unsigned NotOutlinedCost = getNotOutlinedCost();
unsigned OutlinedCost = getOutliningCost();
return (NotOutlinedCost < OutlinedCost) ? 0
: NotOutlinedCost - OutlinedCost;
}
- OutlinedFunction(std::vector<Candidate> &Cands,
- unsigned SequenceSize, unsigned FrameOverhead,
- unsigned FrameConstructionID)
- : SequenceSize(SequenceSize), FrameOverhead(FrameOverhead),
- FrameConstructionID(FrameConstructionID) {
- OccurrenceCount = Cands.size();
- for (Candidate &C : Cands)
- Candidates.push_back(std::make_shared<outliner::Candidate>(C));
-
- unsigned B = getBenefit();
- for (std::shared_ptr<Candidate> &C : Candidates)
- C->Benefit = B;
+ /// Return the number of instructions in this sequence.
+ unsigned getNumInstrs() const { return Candidates[0].getLength(); }
+
+ OutlinedFunction(std::vector<Candidate> &Candidates, unsigned SequenceSize,
+ unsigned FrameOverhead, unsigned FrameConstructionID)
+ : Candidates(Candidates), SequenceSize(SequenceSize),
+ FrameOverhead(FrameOverhead), FrameConstructionID(FrameConstructionID) {
+ const unsigned B = getBenefit();
+ for (Candidate &C : Candidates)
+ C.Benefit = B;
}
OutlinedFunction() {}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
index 3aba0bba7d1a..a031c92d914f 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
@@ -24,22 +24,20 @@
namespace llvm {
-using MachinePassCtor = void *(*)();
-
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
/// registration list.
///
//===----------------------------------------------------------------------===//
-class MachinePassRegistryListener {
- virtual void anchor();
+template <class PassCtorTy> class MachinePassRegistryListener {
+ virtual void anchor() {}
public:
MachinePassRegistryListener() = default;
virtual ~MachinePassRegistryListener() = default;
- virtual void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) = 0;
+ virtual void NotifyAdd(StringRef N, PassCtorTy C, StringRef D) = 0;
virtual void NotifyRemove(StringRef N) = 0;
};
@@ -48,15 +46,15 @@ public:
/// MachinePassRegistryNode - Machine pass node stored in registration list.
///
//===----------------------------------------------------------------------===//
-class MachinePassRegistryNode {
+template <typename PassCtorTy> class MachinePassRegistryNode {
private:
MachinePassRegistryNode *Next = nullptr; // Next function pass in list.
StringRef Name; // Name of function pass.
StringRef Description; // Description string.
- MachinePassCtor Ctor; // Function pass creator.
+ PassCtorTy Ctor; // Pass creator.
public:
- MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
+ MachinePassRegistryNode(const char *N, const char *D, PassCtorTy C)
: Name(N), Description(D), Ctor(C) {}
// Accessors
@@ -64,7 +62,7 @@ public:
MachinePassRegistryNode **getNextAddress() { return &Next; }
StringRef getName() const { return Name; }
StringRef getDescription() const { return Description; }
- MachinePassCtor getCtor() const { return Ctor; }
+ PassCtorTy getCtor() const { return Ctor; }
void setNext(MachinePassRegistryNode *N) { Next = N; }
};
@@ -73,11 +71,12 @@ public:
/// MachinePassRegistry - Track the registration of machine passes.
///
//===----------------------------------------------------------------------===//
-class MachinePassRegistry {
+template <typename PassCtorTy> class MachinePassRegistry {
private:
- MachinePassRegistryNode *List; // List of registry nodes.
- MachinePassCtor Default; // Default function pass creator.
- MachinePassRegistryListener *Listener; // Listener for list adds are removes.
+ MachinePassRegistryNode<PassCtorTy> *List; // List of registry nodes.
+ PassCtorTy Default; // Default function pass creator.
+ MachinePassRegistryListener<PassCtorTy>
+ *Listener; // Listener for list adds are removes.
public:
// NO CONSTRUCTOR - we don't want static constructor ordering to mess
@@ -85,19 +84,47 @@ public:
// Accessors.
//
- MachinePassRegistryNode *getList() { return List; }
- MachinePassCtor getDefault() { return Default; }
- void setDefault(MachinePassCtor C) { Default = C; }
- void setDefault(StringRef Name);
- void setListener(MachinePassRegistryListener *L) { Listener = L; }
+ MachinePassRegistryNode<PassCtorTy> *getList() { return List; }
+ PassCtorTy getDefault() { return Default; }
+ void setDefault(PassCtorTy C) { Default = C; }
+ /// setDefault - Set the default constructor by name.
+ void setDefault(StringRef Name) {
+ PassCtorTy Ctor = nullptr;
+ for (MachinePassRegistryNode<PassCtorTy> *R = getList(); R;
+ R = R->getNext()) {
+ if (R->getName() == Name) {
+ Ctor = R->getCtor();
+ break;
+ }
+ }
+ assert(Ctor && "Unregistered pass name");
+ setDefault(Ctor);
+ }
+ void setListener(MachinePassRegistryListener<PassCtorTy> *L) { Listener = L; }
/// Add - Adds a function pass to the registration list.
///
- void Add(MachinePassRegistryNode *Node);
+ void Add(MachinePassRegistryNode<PassCtorTy> *Node) {
+ Node->setNext(List);
+ List = Node;
+ if (Listener)
+ Listener->NotifyAdd(Node->getName(), Node->getCtor(),
+ Node->getDescription());
+ }
/// Remove - Removes a function pass from the registration list.
///
- void Remove(MachinePassRegistryNode *Node);
+ void Remove(MachinePassRegistryNode<PassCtorTy> *Node) {
+ for (MachinePassRegistryNode<PassCtorTy> **I = &List; *I;
+ I = (*I)->getNextAddress()) {
+ if (*I == Node) {
+ if (Listener)
+ Listener->NotifyRemove(Node->getName());
+ *I = (*I)->getNext();
+ break;
+ }
+ }
+ }
};
//===----------------------------------------------------------------------===//
@@ -105,9 +132,11 @@ public:
/// RegisterPassParser class - Handle the addition of new machine passes.
///
//===----------------------------------------------------------------------===//
-template<class RegistryClass>
-class RegisterPassParser : public MachinePassRegistryListener,
- public cl::parser<typename RegistryClass::FunctionPassCtor> {
+template <class RegistryClass>
+class RegisterPassParser
+ : public MachinePassRegistryListener<
+ typename RegistryClass::FunctionPassCtor>,
+ public cl::parser<typename RegistryClass::FunctionPassCtor> {
public:
RegisterPassParser(cl::Option &O)
: cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
@@ -129,8 +158,9 @@ public:
}
// Implement the MachinePassRegistryListener callbacks.
- void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) override {
- this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
+ void NotifyAdd(StringRef N, typename RegistryClass::FunctionPassCtor C,
+ StringRef D) override {
+ this->addLiteralOption(N, C, D);
}
void NotifyRemove(StringRef N) override {
this->removeLiteralOption(N);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePipeliner.h b/contrib/llvm/include/llvm/CodeGen/MachinePipeliner.h
new file mode 100644
index 000000000000..38cb33e90e63
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePipeliner.h
@@ -0,0 +1,608 @@
+//===- MachinePipeliner.h - Machine Software Pipeliner Pass -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the Swing Modulo Scheduling (SMS) software pipeliner.
+//
+// Software pipelining (SWP) is an instruction scheduling technique for loops
+// that overlap loop iterations and exploits ILP via a compiler transformation.
+//
+// Swing Modulo Scheduling is an implementation of software pipelining
+// that generates schedules that are near optimal in terms of initiation
+// interval, register requirements, and stage count. See the papers:
+//
+// "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa,
+// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Proceedings of the 1996
+// Conference on Parallel Architectures and Compilation Techiniques.
+//
+// "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J.
+// Llosa, E. Ayguade, A. Gonzalez, M. Valero, and J. Eckhardt. In IEEE
+// Transactions on Computers, Vol. 50, No. 3, 2001.
+//
+// "An Implementation of Swing Modulo Scheduling With Extensions for
+// Superblocks", by T. Lattner, Master's Thesis, University of Illinois at
+// Urbana-Champaign, 2005.
+//
+//
+// The SMS algorithm consists of three main steps after computing the minimal
+// initiation interval (MII).
+// 1) Analyze the dependence graph and compute information about each
+// instruction in the graph.
+// 2) Order the nodes (instructions) by priority based upon the heuristics
+// described in the algorithm.
+// 3) Attempt to schedule the nodes in the specified order using the MII.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
+#define LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+namespace llvm {
+
+class NodeSet;
+class SMSchedule;
+
+extern cl::opt<bool> SwpEnableCopyToPhi;
+
+/// The main class in the implementation of the target independent
+/// software pipeliner pass.
+class MachinePipeliner : public MachineFunctionPass {
+public:
+ MachineFunction *MF = nullptr;
+ const MachineLoopInfo *MLI = nullptr;
+ const MachineDominatorTree *MDT = nullptr;
+ const InstrItineraryData *InstrItins;
+ const TargetInstrInfo *TII = nullptr;
+ RegisterClassInfo RegClassInfo;
+
+#ifndef NDEBUG
+ static int NumTries;
+#endif
+
+ /// Cache the target analysis information about the loop.
+ struct LoopInfo {
+ MachineBasicBlock *TBB = nullptr;
+ MachineBasicBlock *FBB = nullptr;
+ SmallVector<MachineOperand, 4> BrCond;
+ MachineInstr *LoopInductionVar = nullptr;
+ MachineInstr *LoopCompare = nullptr;
+ };
+ LoopInfo LI;
+
+ static char ID;
+
+ MachinePipeliner() : MachineFunctionPass(ID) {
+ initializeMachinePipelinerPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addPreserved<AAResultsWrapperPass>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+private:
+ void preprocessPhiNodes(MachineBasicBlock &B);
+ bool canPipelineLoop(MachineLoop &L);
+ bool scheduleLoop(MachineLoop &L);
+ bool swingModuloScheduler(MachineLoop &L);
+};
+
+/// This class builds the dependence graph for the instructions in a loop,
+/// and attempts to schedule the instructions using the SMS algorithm.
+class SwingSchedulerDAG : public ScheduleDAGInstrs {
+ MachinePipeliner &Pass;
+ /// The minimum initiation interval between iterations for this schedule.
+ unsigned MII = 0;
+ /// Set to true if a valid pipelined schedule is found for the loop.
+ bool Scheduled = false;
+ MachineLoop &Loop;
+ LiveIntervals &LIS;
+ const RegisterClassInfo &RegClassInfo;
+
+ /// A toplogical ordering of the SUnits, which is needed for changing
+ /// dependences and iterating over the SUnits.
+ ScheduleDAGTopologicalSort Topo;
+
+ struct NodeInfo {
+ int ASAP = 0;
+ int ALAP = 0;
+ int ZeroLatencyDepth = 0;
+ int ZeroLatencyHeight = 0;
+
+ NodeInfo() = default;
+ };
+ /// Computed properties for each node in the graph.
+ std::vector<NodeInfo> ScheduleInfo;
+
+ enum OrderKind { BottomUp = 0, TopDown = 1 };
+ /// Computed node ordering for scheduling.
+ SetVector<SUnit *> NodeOrder;
+
+ using NodeSetType = SmallVector<NodeSet, 8>;
+ using ValueMapTy = DenseMap<unsigned, unsigned>;
+ using MBBVectorTy = SmallVectorImpl<MachineBasicBlock *>;
+ using InstrMapTy = DenseMap<MachineInstr *, MachineInstr *>;
+
+ /// Instructions to change when emitting the final schedule.
+ DenseMap<SUnit *, std::pair<unsigned, int64_t>> InstrChanges;
+
+ /// We may create a new instruction, so remember it because it
+ /// must be deleted when the pass is finished.
+ SmallPtrSet<MachineInstr *, 4> NewMIs;
+
+ /// Ordered list of DAG postprocessing steps.
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
+
+ /// Helper class to implement Johnson's circuit finding algorithm.
+ class Circuits {
+ std::vector<SUnit> &SUnits;
+ SetVector<SUnit *> Stack;
+ BitVector Blocked;
+ SmallVector<SmallPtrSet<SUnit *, 4>, 10> B;
+ SmallVector<SmallVector<int, 4>, 16> AdjK;
+ // Node to Index from ScheduleDAGTopologicalSort
+ std::vector<int> *Node2Idx;
+ unsigned NumPaths;
+ static unsigned MaxPaths;
+
+ public:
+ Circuits(std::vector<SUnit> &SUs, ScheduleDAGTopologicalSort &Topo)
+ : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) {
+ Node2Idx = new std::vector<int>(SUs.size());
+ unsigned Idx = 0;
+ for (const auto &NodeNum : Topo)
+ Node2Idx->at(NodeNum) = Idx++;
+ }
+
+ ~Circuits() { delete Node2Idx; }
+
+ /// Reset the data structures used in the circuit algorithm.
+ void reset() {
+ Stack.clear();
+ Blocked.reset();
+ B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>());
+ NumPaths = 0;
+ }
+
+ void createAdjacencyStructure(SwingSchedulerDAG *DAG);
+ bool circuit(int V, int S, NodeSetType &NodeSets, bool HasBackedge = false);
+ void unblock(int U);
+ };
+
+ struct CopyToPhiMutation : public ScheduleDAGMutation {
+ void apply(ScheduleDAGInstrs *DAG) override;
+ };
+
+public:
+ SwingSchedulerDAG(MachinePipeliner &P, MachineLoop &L, LiveIntervals &lis,
+ const RegisterClassInfo &rci)
+ : ScheduleDAGInstrs(*P.MF, P.MLI, false), Pass(P), Loop(L), LIS(lis),
+ RegClassInfo(rci), Topo(SUnits, &ExitSU) {
+ P.MF->getSubtarget().getSMSMutations(Mutations);
+ if (SwpEnableCopyToPhi)
+ Mutations.push_back(llvm::make_unique<CopyToPhiMutation>());
+ }
+
+ void schedule() override;
+ void finishBlock() override;
+
+ /// Return true if the loop kernel has been scheduled.
+ bool hasNewSchedule() { return Scheduled; }
+
+ /// Return the earliest time an instruction may be scheduled.
+ int getASAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ASAP; }
+
+ /// Return the latest time an instruction my be scheduled.
+ int getALAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ALAP; }
+
+ /// The mobility function, which the number of slots in which
+ /// an instruction may be scheduled.
+ int getMOV(SUnit *Node) { return getALAP(Node) - getASAP(Node); }
+
+ /// The depth, in the dependence graph, for a node.
+ unsigned getDepth(SUnit *Node) { return Node->getDepth(); }
+
+ /// The maximum unweighted length of a path from an arbitrary node to the
+ /// given node in which each edge has latency 0
+ int getZeroLatencyDepth(SUnit *Node) {
+ return ScheduleInfo[Node->NodeNum].ZeroLatencyDepth;
+ }
+
+ /// The height, in the dependence graph, for a node.
+ unsigned getHeight(SUnit *Node) { return Node->getHeight(); }
+
+ /// The maximum unweighted length of a path from the given node to an
+ /// arbitrary node in which each edge has latency 0
+ int getZeroLatencyHeight(SUnit *Node) {
+ return ScheduleInfo[Node->NodeNum].ZeroLatencyHeight;
+ }
+
+ /// Return true if the dependence is a back-edge in the data dependence graph.
+ /// Since the DAG doesn't contain cycles, we represent a cycle in the graph
+ /// using an anti dependence from a Phi to an instruction.
+ bool isBackedge(SUnit *Source, const SDep &Dep) {
+ if (Dep.getKind() != SDep::Anti)
+ return false;
+ return Source->getInstr()->isPHI() || Dep.getSUnit()->getInstr()->isPHI();
+ }
+
+ bool isLoopCarriedDep(SUnit *Source, const SDep &Dep, bool isSucc = true);
+
+ /// The distance function, which indicates that operation V of iteration I
+ /// depends on operations U of iteration I-distance.
+ unsigned getDistance(SUnit *U, SUnit *V, const SDep &Dep) {
+ // Instructions that feed a Phi have a distance of 1. Computing larger
+ // values for arrays requires data dependence information.
+ if (V->getInstr()->isPHI() && Dep.getKind() == SDep::Anti)
+ return 1;
+ return 0;
+ }
+
+ /// Set the Minimum Initiation Interval for this schedule attempt.
+ void setMII(unsigned mii) { MII = mii; }
+
+ void applyInstrChange(MachineInstr *MI, SMSchedule &Schedule);
+
+ void fixupRegisterOverlaps(std::deque<SUnit *> &Instrs);
+
+ /// Return the new base register that was stored away for the changed
+ /// instruction.
+ unsigned getInstrBaseReg(SUnit *SU) {
+ DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
+ InstrChanges.find(SU);
+ if (It != InstrChanges.end())
+ return It->second.first;
+ return 0;
+ }
+
+ void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+ Mutations.push_back(std::move(Mutation));
+ }
+
+ static bool classof(const ScheduleDAGInstrs *DAG) { return true; }
+
+private:
+ void addLoopCarriedDependences(AliasAnalysis *AA);
+ void updatePhiDependences();
+ void changeDependences();
+ unsigned calculateResMII();
+ unsigned calculateRecMII(NodeSetType &RecNodeSets);
+ void findCircuits(NodeSetType &NodeSets);
+ void fuseRecs(NodeSetType &NodeSets);
+ void removeDuplicateNodes(NodeSetType &NodeSets);
+ void computeNodeFunctions(NodeSetType &NodeSets);
+ void registerPressureFilter(NodeSetType &NodeSets);
+ void colocateNodeSets(NodeSetType &NodeSets);
+ void checkNodeSets(NodeSetType &NodeSets);
+ void groupRemainingNodes(NodeSetType &NodeSets);
+ void addConnectedNodes(SUnit *SU, NodeSet &NewSet,
+ SetVector<SUnit *> &NodesAdded);
+ void computeNodeOrder(NodeSetType &NodeSets);
+ void checkValidNodeOrder(const NodeSetType &Circuits) const;
+ bool schedulePipeline(SMSchedule &Schedule);
+ void generatePipelinedLoop(SMSchedule &Schedule);
+ void generateProlog(SMSchedule &Schedule, unsigned LastStage,
+ MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
+ MBBVectorTy &PrologBBs);
+ void generateEpilog(SMSchedule &Schedule, unsigned LastStage,
+ MachineBasicBlock *KernelBB, ValueMapTy *VRMap,
+ MBBVectorTy &EpilogBBs, MBBVectorTy &PrologBBs);
+ void generateExistingPhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
+ MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
+ SMSchedule &Schedule, ValueMapTy *VRMap,
+ InstrMapTy &InstrMap, unsigned LastStageNum,
+ unsigned CurStageNum, bool IsLast);
+ void generatePhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
+ MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
+ SMSchedule &Schedule, ValueMapTy *VRMap,
+ InstrMapTy &InstrMap, unsigned LastStageNum,
+ unsigned CurStageNum, bool IsLast);
+ void removeDeadInstructions(MachineBasicBlock *KernelBB,
+ MBBVectorTy &EpilogBBs);
+ void splitLifetimes(MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs,
+ SMSchedule &Schedule);
+ void addBranches(MBBVectorTy &PrologBBs, MachineBasicBlock *KernelBB,
+ MBBVectorTy &EpilogBBs, SMSchedule &Schedule,
+ ValueMapTy *VRMap);
+ bool computeDelta(MachineInstr &MI, unsigned &Delta);
+ void updateMemOperands(MachineInstr &NewMI, MachineInstr &OldMI,
+ unsigned Num);
+ MachineInstr *cloneInstr(MachineInstr *OldMI, unsigned CurStageNum,
+ unsigned InstStageNum);
+ MachineInstr *cloneAndChangeInstr(MachineInstr *OldMI, unsigned CurStageNum,
+ unsigned InstStageNum,
+ SMSchedule &Schedule);
+ void updateInstruction(MachineInstr *NewMI, bool LastDef,
+ unsigned CurStageNum, unsigned InstrStageNum,
+ SMSchedule &Schedule, ValueMapTy *VRMap);
+ MachineInstr *findDefInLoop(unsigned Reg);
+ unsigned getPrevMapVal(unsigned StageNum, unsigned PhiStage, unsigned LoopVal,
+ unsigned LoopStage, ValueMapTy *VRMap,
+ MachineBasicBlock *BB);
+ void rewritePhiValues(MachineBasicBlock *NewBB, unsigned StageNum,
+ SMSchedule &Schedule, ValueMapTy *VRMap,
+ InstrMapTy &InstrMap);
+ void rewriteScheduledInstr(MachineBasicBlock *BB, SMSchedule &Schedule,
+ InstrMapTy &InstrMap, unsigned CurStageNum,
+ unsigned PhiNum, MachineInstr *Phi,
+ unsigned OldReg, unsigned NewReg,
+ unsigned PrevReg = 0);
+ bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
+ unsigned &OffsetPos, unsigned &NewBase,
+ int64_t &NewOffset);
+ void postprocessDAG();
+};
+
+/// A NodeSet contains a set of SUnit DAG nodes with additional information
+/// that assigns a priority to the set.
+class NodeSet {
+ SetVector<SUnit *> Nodes;
+ bool HasRecurrence = false;
+ unsigned RecMII = 0;
+ int MaxMOV = 0;
+ unsigned MaxDepth = 0;
+ unsigned Colocate = 0;
+ SUnit *ExceedPressure = nullptr;
+ unsigned Latency = 0;
+
+public:
+ using iterator = SetVector<SUnit *>::const_iterator;
+
+ NodeSet() = default;
+ NodeSet(iterator S, iterator E) : Nodes(S, E), HasRecurrence(true) {
+ Latency = 0;
+ for (unsigned i = 0, e = Nodes.size(); i < e; ++i)
+ for (const SDep &Succ : Nodes[i]->Succs)
+ if (Nodes.count(Succ.getSUnit()))
+ Latency += Succ.getLatency();
+ }
+
+ bool insert(SUnit *SU) { return Nodes.insert(SU); }
+
+ void insert(iterator S, iterator E) { Nodes.insert(S, E); }
+
+ template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) {
+ return Nodes.remove_if(P);
+ }
+
+ unsigned count(SUnit *SU) const { return Nodes.count(SU); }
+
+ bool hasRecurrence() { return HasRecurrence; };
+
+ unsigned size() const { return Nodes.size(); }
+
+ bool empty() const { return Nodes.empty(); }
+
+ SUnit *getNode(unsigned i) const { return Nodes[i]; };
+
+ void setRecMII(unsigned mii) { RecMII = mii; };
+
+ void setColocate(unsigned c) { Colocate = c; };
+
+ void setExceedPressure(SUnit *SU) { ExceedPressure = SU; }
+
+ bool isExceedSU(SUnit *SU) { return ExceedPressure == SU; }
+
+ int compareRecMII(NodeSet &RHS) { return RecMII - RHS.RecMII; }
+
+ int getRecMII() { return RecMII; }
+
+ /// Summarize node functions for the entire node set.
+ void computeNodeSetInfo(SwingSchedulerDAG *SSD) {
+ for (SUnit *SU : *this) {
+ MaxMOV = std::max(MaxMOV, SSD->getMOV(SU));
+ MaxDepth = std::max(MaxDepth, SSD->getDepth(SU));
+ }
+ }
+
+ unsigned getLatency() { return Latency; }
+
+ unsigned getMaxDepth() { return MaxDepth; }
+
+ void clear() {
+ Nodes.clear();
+ RecMII = 0;
+ HasRecurrence = false;
+ MaxMOV = 0;
+ MaxDepth = 0;
+ Colocate = 0;
+ ExceedPressure = nullptr;
+ }
+
+ operator SetVector<SUnit *> &() { return Nodes; }
+
+ /// Sort the node sets by importance. First, rank them by recurrence MII,
+ /// then by mobility (least mobile done first), and finally by depth.
+ /// Each node set may contain a colocate value which is used as the first
+ /// tie breaker, if it's set.
+ bool operator>(const NodeSet &RHS) const {
+ if (RecMII == RHS.RecMII) {
+ if (Colocate != 0 && RHS.Colocate != 0 && Colocate != RHS.Colocate)
+ return Colocate < RHS.Colocate;
+ if (MaxMOV == RHS.MaxMOV)
+ return MaxDepth > RHS.MaxDepth;
+ return MaxMOV < RHS.MaxMOV;
+ }
+ return RecMII > RHS.RecMII;
+ }
+
+ bool operator==(const NodeSet &RHS) const {
+ return RecMII == RHS.RecMII && MaxMOV == RHS.MaxMOV &&
+ MaxDepth == RHS.MaxDepth;
+ }
+
+ bool operator!=(const NodeSet &RHS) const { return !operator==(RHS); }
+
+ iterator begin() { return Nodes.begin(); }
+ iterator end() { return Nodes.end(); }
+ void print(raw_ostream &os) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const;
+#endif
+};
+
+/// This class represents the scheduled code. The main data structure is a
+/// map from scheduled cycle to instructions. During scheduling, the
+/// data structure explicitly represents all stages/iterations. When
+/// the algorithm finshes, the schedule is collapsed into a single stage,
+/// which represents instructions from different loop iterations.
+///
+/// The SMS algorithm allows negative values for cycles, so the first cycle
+/// in the schedule is the smallest cycle value.
+class SMSchedule {
+private:
+ /// Map from execution cycle to instructions.
+ DenseMap<int, std::deque<SUnit *>> ScheduledInstrs;
+
+ /// Map from instruction to execution cycle.
+ std::map<SUnit *, int> InstrToCycle;
+
+ /// Map for each register and the max difference between its uses and def.
+ /// The first element in the pair is the max difference in stages. The
+ /// second is true if the register defines a Phi value and loop value is
+ /// scheduled before the Phi.
+ std::map<unsigned, std::pair<unsigned, bool>> RegToStageDiff;
+
+ /// Keep track of the first cycle value in the schedule. It starts
+ /// as zero, but the algorithm allows negative values.
+ int FirstCycle = 0;
+
+ /// Keep track of the last cycle value in the schedule.
+ int LastCycle = 0;
+
+ /// The initiation interval (II) for the schedule.
+ int InitiationInterval = 0;
+
+ /// Target machine information.
+ const TargetSubtargetInfo &ST;
+
+ /// Virtual register information.
+ MachineRegisterInfo &MRI;
+
+ std::unique_ptr<DFAPacketizer> Resources;
+
+public:
+ SMSchedule(MachineFunction *mf)
+ : ST(mf->getSubtarget()), MRI(mf->getRegInfo()),
+ Resources(ST.getInstrInfo()->CreateTargetScheduleState(ST)) {}
+
+ void reset() {
+ ScheduledInstrs.clear();
+ InstrToCycle.clear();
+ RegToStageDiff.clear();
+ FirstCycle = 0;
+ LastCycle = 0;
+ InitiationInterval = 0;
+ }
+
+ /// Set the initiation interval for this schedule.
+ void setInitiationInterval(int ii) { InitiationInterval = ii; }
+
+ /// Return the first cycle in the completed schedule. This
+ /// can be a negative value.
+ int getFirstCycle() const { return FirstCycle; }
+
+ /// Return the last cycle in the finalized schedule.
+ int getFinalCycle() const { return FirstCycle + InitiationInterval - 1; }
+
+ /// Return the cycle of the earliest scheduled instruction in the dependence
+ /// chain.
+ int earliestCycleInChain(const SDep &Dep);
+
+ /// Return the cycle of the latest scheduled instruction in the dependence
+ /// chain.
+ int latestCycleInChain(const SDep &Dep);
+
+ void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
+ int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
+ bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);
+
+ /// Iterators for the cycle to instruction map.
+ using sched_iterator = DenseMap<int, std::deque<SUnit *>>::iterator;
+ using const_sched_iterator =
+ DenseMap<int, std::deque<SUnit *>>::const_iterator;
+
+ /// Return true if the instruction is scheduled at the specified stage.
+ bool isScheduledAtStage(SUnit *SU, unsigned StageNum) {
+ return (stageScheduled(SU) == (int)StageNum);
+ }
+
+ /// Return the stage for a scheduled instruction. Return -1 if
+ /// the instruction has not been scheduled.
+ int stageScheduled(SUnit *SU) const {
+ std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
+ if (it == InstrToCycle.end())
+ return -1;
+ return (it->second - FirstCycle) / InitiationInterval;
+ }
+
+ /// Return the cycle for a scheduled instruction. This function normalizes
+ /// the first cycle to be 0.
+ unsigned cycleScheduled(SUnit *SU) const {
+ std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
+ assert(it != InstrToCycle.end() && "Instruction hasn't been scheduled.");
+ return (it->second - FirstCycle) % InitiationInterval;
+ }
+
+ /// Return the maximum stage count needed for this schedule.
+ unsigned getMaxStageCount() {
+ return (LastCycle - FirstCycle) / InitiationInterval;
+ }
+
+ /// Return the max. number of stages/iterations that can occur between a
+ /// register definition and its uses.
+ unsigned getStagesForReg(int Reg, unsigned CurStage) {
+ std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
+ if (CurStage > getMaxStageCount() && Stages.first == 0 && Stages.second)
+ return 1;
+ return Stages.first;
+ }
+
+ /// The number of stages for a Phi is a little different than other
+ /// instructions. The minimum value computed in RegToStageDiff is 1
+ /// because we assume the Phi is needed for at least 1 iteration.
+ /// This is not the case if the loop value is scheduled prior to the
+ /// Phi in the same stage. This function returns the number of stages
+ /// or iterations needed between the Phi definition and any uses.
+ unsigned getStagesForPhi(int Reg) {
+ std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
+ if (Stages.second)
+ return Stages.first;
+ return Stages.first - 1;
+ }
+
+ /// Return the instructions that are scheduled at the specified cycle.
+ std::deque<SUnit *> &getInstructions(int cycle) {
+ return ScheduledInstrs[cycle];
+ }
+
+ bool isValidSchedule(SwingSchedulerDAG *SSD);
+ void finalizeSchedule(SwingSchedulerDAG *SSD);
+ void orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
+ std::deque<SUnit *> &Insts);
+ bool isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi);
+ bool isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Def,
+ MachineOperand &MO);
+ void print(raw_ostream &os) const;
+ void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_MACHINEPIPELINER_H
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 5bf4a49c8b3b..fef010a23ef9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -689,15 +689,14 @@ public:
unsigned MinNumRegs = 0);
/// Constrain the register class or the register bank of the virtual register
- /// \p Reg to be a common subclass and a common bank of both registers
- /// provided respectively. Do nothing if any of the attributes (classes,
- /// banks, or low-level types) of the registers are deemed incompatible, or if
- /// the resulting register will have a class smaller than before and of size
- /// less than \p MinNumRegs. Return true if such register attributes exist,
- /// false otherwise.
+ /// \p Reg (and low-level type) to be a common subclass or a common bank of
+ /// both registers provided respectively (and a common low-level type). Do
+ /// nothing if any of the attributes (classes, banks, or low-level types) of
+ /// the registers are deemed incompatible, or if the resulting register will
+ /// have a class smaller than before and of size less than \p MinNumRegs.
+ /// Return true if such register attributes exist, false otherwise.
///
- /// \note Assumes that each register has either a low-level type or a class
- /// assigned, but not both. Use this method instead of constrainRegClass and
+ /// \note Use this method instead of constrainRegClass and
/// RegisterBankInfo::constrainGenericRegister everywhere but SelectionDAG
/// ISel / FastISel and GlobalISel's InstructionSelect pass respectively.
bool constrainRegAttrs(unsigned Reg, unsigned ConstrainingReg,
@@ -717,6 +716,10 @@ public:
unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name = "");
+ /// Create and return a new virtual register in the function with the same
+ /// attributes as the given register.
+ unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = "");
+
/// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
LLT getType(unsigned Reg) const {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
index 85ffa4eda2b8..4bc31ae7c61a 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -132,17 +132,19 @@ struct MachineSchedContext {
/// MachineSchedRegistry provides a selection of available machine instruction
/// schedulers.
-class MachineSchedRegistry : public MachinePassRegistryNode {
+class MachineSchedRegistry
+ : public MachinePassRegistryNode<
+ ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
public:
using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);
// RegisterPassParser requires a (misnamed) FunctionPassCtor type.
using FunctionPassCtor = ScheduleDAGCtor;
- static MachinePassRegistry Registry;
+ static MachinePassRegistry<ScheduleDAGCtor> Registry;
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
- : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+ : MachinePassRegistryNode(N, D, C) {
Registry.Add(this);
}
@@ -158,7 +160,7 @@ public:
return (MachineSchedRegistry *)Registry.getList();
}
- static void setListener(MachinePassRegistryListener *L) {
+ static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
Registry.setListener(L);
}
};
@@ -466,6 +468,9 @@ public:
PressureDiff &getPressureDiff(const SUnit *SU) {
return SUPressureDiffs[SU->NodeNum];
}
+ const PressureDiff &getPressureDiff(const SUnit *SU) const {
+ return SUPressureDiffs[SU->NodeNum];
+ }
/// Compute a DFSResult after DAG building is complete, and before any
/// queue comparisons.
@@ -491,6 +496,8 @@ public:
/// Compute the cyclic critical path through the DAG.
unsigned computeCyclicCriticalPath();
+ void dump() const override;
+
protected:
// Top-Level entry points for the schedule() driver...
@@ -787,7 +794,7 @@ public:
/// Represent the type of SchedCandidate found within a single queue.
/// pickNodeBidirectional depends on these listed by decreasing priority.
enum CandReason : uint8_t {
- NoCand, Only1, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak,
+ NoCand, Only1, PhysReg, RegExcess, RegCritical, Stall, Cluster, Weak,
RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
@@ -895,6 +902,10 @@ protected:
#ifndef NDEBUG
void traceCandidate(const SchedCandidate &Cand);
#endif
+
+private:
+ bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
+ bool ComputeRemLatency, unsigned &RemLatency) const;
};
// Utility functions used by heuristics in tryCandidate().
@@ -917,7 +928,7 @@ bool tryPressure(const PressureChange &TryP,
const TargetRegisterInfo *TRI,
const MachineFunction &MF);
unsigned getWeakLeft(const SUnit *SU, bool isTop);
-int biasPhysRegCopy(const SUnit *SU, bool isTop);
+int biasPhysReg(const SUnit *SU, bool isTop);
/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
@@ -995,7 +1006,7 @@ protected:
const RegPressureTracker &RPTracker,
SchedCandidate &Candidate);
- void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+ void reschedulePhysReg(SUnit *SU, bool isTop);
};
/// PostGenericScheduler - Interface to the scheduling algorithm used by
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index cb12b14f4435..acf1ebb5bc83 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -379,14 +379,20 @@ namespace llvm {
///
FunctionPass *createInterleavedAccessPass();
+ /// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
+ /// combines them into wide loads detectable by InterleavedAccessPass
+ ///
+ FunctionPass *createInterleavedLoadCombinePass();
+
/// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
/// TLS variables for the emulated TLS model.
///
ModulePass *createLowerEmuTLSPass();
- /// This pass lowers the \@llvm.load.relative intrinsic to instructions.
- /// This is unsafe to do earlier because a pass may combine the constant
- /// initializer into the load, which may result in an overflowing evaluation.
+ /// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
+ /// instructions. This is unsafe to do earlier because a pass may combine the
+ /// constant initializer into the load, which may result in an overflowing
+ /// evaluation.
ModulePass *createPreISelIntrinsicLoweringPass();
/// GlobalMerge - This pass merges internal (by default) globals into structs
diff --git a/contrib/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h b/contrib/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
index 7a007eb8bcea..b7f83e515b7e 100644
--- a/contrib/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/PreISelIntrinsicLowering.h
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass implements IR lowering for the llvm.load.relative intrinsic.
+// This pass implements IR lowering for the llvm.load.relative and llvm.objc.*
+// intrinsics.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
diff --git a/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
index bdf0bb731540..f66191bc9fb4 100644
--- a/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
+++ b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
@@ -36,7 +36,7 @@ raw_ostream &operator<<(raw_ostream &OS, const PseudoSourceValue* PSV);
/// below the stack frame (e.g., argument space), or constant pool.
class PseudoSourceValue {
public:
- enum PSVKind {
+ enum PSVKind : unsigned {
Stack,
GOT,
JumpTable,
@@ -48,7 +48,7 @@ public:
};
private:
- PSVKind Kind;
+ unsigned Kind;
unsigned AddressSpace;
friend raw_ostream &llvm::operator<<(raw_ostream &OS,
const PseudoSourceValue* PSV);
@@ -60,11 +60,11 @@ private:
virtual void printCustom(raw_ostream &O) const;
public:
- explicit PseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+ explicit PseudoSourceValue(unsigned Kind, const TargetInstrInfo &TII);
virtual ~PseudoSourceValue();
- PSVKind kind() const { return Kind; }
+ unsigned kind() const { return Kind; }
bool isStack() const { return Kind == Stack; }
bool isGOT() const { return Kind == GOT; }
@@ -116,7 +116,7 @@ public:
class CallEntryPseudoSourceValue : public PseudoSourceValue {
protected:
- CallEntryPseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+ CallEntryPseudoSourceValue(unsigned Kind, const TargetInstrInfo &TII);
public:
bool isConstant(const MachineFrameInfo *) const override;
diff --git a/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h b/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h
index 481747dc163e..b518fbb9c9da 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h
@@ -26,14 +26,14 @@ class FunctionPass;
/// RegisterRegAlloc class - Track the registration of register allocators.
///
//===----------------------------------------------------------------------===//
-class RegisterRegAlloc : public MachinePassRegistryNode {
+class RegisterRegAlloc : public MachinePassRegistryNode<FunctionPass *(*)()> {
public:
using FunctionPassCtor = FunctionPass *(*)();
- static MachinePassRegistry Registry;
+ static MachinePassRegistry<FunctionPassCtor> Registry;
RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
- : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+ : MachinePassRegistryNode(N, D, C) {
Registry.Add(this);
}
@@ -48,15 +48,11 @@ public:
return (RegisterRegAlloc *)Registry.getList();
}
- static FunctionPassCtor getDefault() {
- return (FunctionPassCtor)Registry.getDefault();
- }
+ static FunctionPassCtor getDefault() { return Registry.getDefault(); }
- static void setDefault(FunctionPassCtor C) {
- Registry.setDefault((MachinePassCtor)C);
- }
+ static void setDefault(FunctionPassCtor C) { Registry.setDefault(C); }
- static void setListener(MachinePassRegistryListener *L) {
+ static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
Registry.setListener(L);
}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterUsageInfo.h b/contrib/llvm/include/llvm/CodeGen/RegisterUsageInfo.h
index efd175eeed30..efecc61d9c30 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterUsageInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterUsageInfo.h
@@ -29,7 +29,7 @@
namespace llvm {
class Function;
-class TargetMachine;
+class LLVMTargetMachine;
class PhysicalRegisterUsageInfo : public ImmutablePass {
public:
@@ -41,7 +41,7 @@ public:
}
/// Set TargetMachine which is used to print analysis.
- void setTargetMachine(const TargetMachine &TM);
+ void setTargetMachine(const LLVMTargetMachine &TM);
bool doInitialization(Module &M) override;
@@ -63,7 +63,7 @@ private:
/// and 1 means content of register will be preserved around function call.
DenseMap<const Function *, std::vector<uint32_t>> RegMasks;
- const TargetMachine *TM;
+ const LLVMTargetMachine *TM;
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 56adc2e2fbfa..0870d67db390 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -33,15 +33,15 @@
namespace llvm {
template<class Graph> class GraphWriter;
+class LLVMTargetMachine;
class MachineFunction;
class MachineRegisterInfo;
class MCInstrDesc;
struct MCSchedClassDesc;
-class ScheduleDAG;
class SDNode;
class SUnit;
+class ScheduleDAG;
class TargetInstrInfo;
-class TargetMachine;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -236,8 +236,7 @@ class TargetRegisterInfo;
Contents.Reg = Reg;
}
- raw_ostream &print(raw_ostream &O,
- const TargetRegisterInfo *TRI = nullptr) const;
+ void dump(const TargetRegisterInfo *TRI = nullptr) const;
};
template <>
@@ -459,12 +458,7 @@ class TargetRegisterInfo;
/// edge occurs first.
void biasCriticalPath();
- void dump(const ScheduleDAG *G) const;
- void dumpAll(const ScheduleDAG *G) const;
- raw_ostream &print(raw_ostream &O,
- const SUnit *Entry = nullptr,
- const SUnit *Exit = nullptr) const;
- raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
+ void dumpAttributes() const;
private:
void ComputeDepth();
@@ -564,7 +558,7 @@ class TargetRegisterInfo;
class ScheduleDAG {
public:
- const TargetMachine &TM; ///< Target processor
+ const LLVMTargetMachine &TM; ///< Target processor
const TargetInstrInfo *TII; ///< Target instruction information
const TargetRegisterInfo *TRI; ///< Target processor register info
MachineFunction &MF; ///< Machine function
@@ -597,7 +591,9 @@ class TargetRegisterInfo;
virtual void viewGraph(const Twine &Name, const Twine &Title);
virtual void viewGraph();
- virtual void dumpNode(const SUnit *SU) const = 0;
+ virtual void dumpNode(const SUnit &SU) const = 0;
+ virtual void dump() const = 0;
+ void dumpNodeName(const SUnit &SU) const;
/// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
@@ -614,6 +610,9 @@ class TargetRegisterInfo;
unsigned VerifyScheduledDAG(bool isBottomUp);
#endif
+ protected:
+ void dumpNodeAll(const SUnit &SU) const;
+
private:
/// Returns the MCInstrDesc of this SDNode or NULL.
const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 520a23846f6e..daad18125db9 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -327,7 +327,8 @@ namespace llvm {
/// whole MachineFunction. By default does nothing.
virtual void finalizeSchedule() {}
- void dumpNode(const SUnit *SU) const override;
+ void dumpNode(const SUnit &SU) const override;
+ void dump() const override;
/// Returns a label for a DAG node that points to an instruction.
std::string getGraphNodeLabel(const SUnit *SU) const override;
diff --git a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
index badf927d0e95..fbe559f25556 100644
--- a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
@@ -29,16 +29,19 @@ namespace llvm {
class ScheduleDAGSDNodes;
class SelectionDAGISel;
-class RegisterScheduler : public MachinePassRegistryNode {
+class RegisterScheduler
+ : public MachinePassRegistryNode<
+ ScheduleDAGSDNodes *(*)(SelectionDAGISel *, CodeGenOpt::Level)> {
public:
using FunctionPassCtor = ScheduleDAGSDNodes *(*)(SelectionDAGISel*,
CodeGenOpt::Level);
- static MachinePassRegistry Registry;
+ static MachinePassRegistry<FunctionPassCtor> Registry;
RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
- : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
- { Registry.Add(this); }
+ : MachinePassRegistryNode(N, D, C) {
+ Registry.Add(this);
+ }
~RegisterScheduler() { Registry.Remove(this); }
@@ -51,7 +54,7 @@ public:
return (RegisterScheduler *)Registry.getList();
}
- static void setListener(MachinePassRegistryListener *L) {
+ static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
Registry.setListener(L);
}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 888f9425ff90..67fe87fc96af 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -28,7 +28,7 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
@@ -188,8 +188,8 @@ public:
return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
}
- ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
- DbgValMapType::iterator I = DbgValMap.find(Node);
+ ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
+ auto I = DbgValMap.find(Node);
if (I != DbgValMap.end())
return I->second;
return ArrayRef<SDDbgValue*>();
@@ -229,7 +229,7 @@ class SelectionDAG {
LLVMContext *Context;
CodeGenOpt::Level OptLevel;
- DivergenceAnalysis * DA = nullptr;
+ LegacyDivergenceAnalysis * DA = nullptr;
FunctionLoweringInfo * FLI = nullptr;
/// The function-level optimization remark emitter. Used to emit remarks
@@ -308,6 +308,9 @@ public:
: DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
+
+ private:
+ virtual void anchor();
};
/// When true, additional steps are taken to
@@ -382,7 +385,7 @@ public:
/// Prepare this SelectionDAG to process code in the given MachineFunction.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
- DivergenceAnalysis * Divergence);
+ LegacyDivergenceAnalysis * Divergence);
void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
FLI = FuncInfo;
@@ -471,7 +474,9 @@ public:
return Root;
}
+#ifndef NDEBUG
void VerifyDAGDiverence();
+#endif
/// This iterates over the nodes in the SelectionDAG, folding
/// certain types of nodes together, or eliminating superfluous nodes. The
@@ -784,24 +789,6 @@ public:
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
- /// Return an operation which will any-extend the low lanes of the operand
- /// into the specified vector type. For example,
- /// this can convert a v16i8 into a v4i32 by any-extending the low four
- /// lanes of the operand from i8 to i32.
- SDValue getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
-
- /// Return an operation which will sign extend the low lanes of the operand
- /// into the specified vector type. For example,
- /// this can convert a v16i8 into a v4i32 by sign extending the low four
- /// lanes of the operand from i8 to i32.
- SDValue getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
-
- /// Return an operation which will zero extend the low lanes of the operand
- /// into the specified vector type. For example,
- /// this can convert a v16i8 into a v4i32 by zero extending the low four
- /// lanes of the operand from i8 to i32.
- SDValue getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
-
/// Convert Op, which must be of integer type, to the integer type VT,
/// by using an extension appropriate for the target's
/// BooleanContent for type OpVT or truncating it.
@@ -945,41 +932,45 @@ public:
Type *SizeTy, unsigned ElemSz, bool isTailCall,
MachinePointerInfo DstPtrInfo);
- /// Helper function to make it easier to build SetCC's if you just
- /// have an ISD::CondCode instead of an SDValue.
- ///
+ /// Helper function to make it easier to build SetCC's if you just have an
+ /// ISD::CondCode instead of an SDValue.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
- "Cannot compare scalars to vectors");
+ "Cannot compare scalars to vectors");
assert(LHS.getValueType().isVector() == VT.isVector() &&
- "Cannot compare scalars to vectors");
+ "Cannot compare scalars to vectors");
assert(Cond != ISD::SETCC_INVALID &&
- "Cannot create a setCC of an invalid node.");
+ "Cannot create a setCC of an invalid node.");
return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
}
- /// Helper function to make it easier to build Select's if you just
- /// have operands and don't want to check for vector.
+ /// Helper function to make it easier to build Select's if you just have
+ /// operands and don't want to check for vector.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
SDValue RHS) {
assert(LHS.getValueType() == RHS.getValueType() &&
"Cannot use select on differing types");
assert(VT.isVector() == LHS.getValueType().isVector() &&
"Cannot mix vectors and scalars");
- return getNode(Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
- Cond, LHS, RHS);
+ auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
+ return getNode(Opcode, DL, VT, Cond, LHS, RHS);
}
- /// Helper function to make it easier to build SelectCC's if you
- /// just have an ISD::CondCode instead of an SDValue.
- ///
+ /// Helper function to make it easier to build SelectCC's if you just have an
+ /// ISD::CondCode instead of an SDValue.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
SDValue False, ISD::CondCode Cond) {
- return getNode(ISD::SELECT_CC, DL, True.getValueType(),
- LHS, RHS, True, False, getCondCode(Cond));
+ return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
+ False, getCondCode(Cond));
}
+ /// Try to simplify a select/vselect into 1 of its operands or a constant.
+ SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
+
+ /// Try to simplify a shift into 1 of its operands or a constant.
+ SDValue simplifyShift(SDValue X, SDValue Y);
+
/// VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
@@ -1140,6 +1131,13 @@ public:
/// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
SDValue expandVACopy(SDNode *Node);
+ /// Returs an GlobalAddress of the function from the current module with
+ /// name matching the given ExternalSymbol. Additionally can provide the
+ /// matched function.
+ /// Panics the function doesn't exists.
+ SDValue getSymbolFunctionGlobalAddress(SDValue Op,
+ Function **TargetFunction = nullptr);
+
/// *Mutate* the specified node in-place to have the
/// specified operands. If the resultant node already exists in the DAG,
/// this does not modify the specified node, instead it returns the node that
@@ -1156,6 +1154,11 @@ public:
SDValue Op3, SDValue Op4, SDValue Op5);
SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
+ /// *Mutate* the specified machine node's memory references to the provided
+ /// list.
+ void setNodeMemRefs(MachineSDNode *N,
+ ArrayRef<MachineMemOperand *> NewMemRefs);
+
// Propagates the change in divergence to users
void updateDivergence(SDNode * N);
@@ -1346,7 +1349,7 @@ public:
void AddDbgLabel(SDDbgLabel *DB);
/// Get the debug values which reference the given SDNode.
- ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
+ ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
return DbgInfo->getSDDbgValues(SD);
}
@@ -1429,15 +1432,15 @@ public:
/// every vector element.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
- void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth = 0) const;
+ KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
/// Determine which bits of Op are known to be either zero or one and return
/// them in Known. The DemandedElts argument allows us to only collect the
/// known bits that are shared by the requested vector elements.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
- void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
- unsigned Depth = 0) const;
+ KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
+ unsigned Depth = 0) const;
/// Used to represent the possible overflow behavior of an operation.
/// Never: the operation cannot overflow.
@@ -1484,8 +1487,15 @@ public:
/// X|Cst == X+Cst iff X&Cst = 0.
bool isBaseWithConstantOffset(SDValue Op) const;
- /// Test whether the given SDValue is known to never be NaN.
- bool isKnownNeverNaN(SDValue Op) const;
+ /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
+ /// true, returns if \p Op is known to never be a signaling NaN (it may still
+ /// be a qNaN).
+ bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
+
+ /// \returns true if \p Op is known to never be a signaling NaN.
+ bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
+ return isKnownNeverNaN(Op, true, Depth);
+ }
/// Test whether the given floating point SDValue is known to never be
/// positive or negative zero.
@@ -1503,6 +1513,27 @@ public:
/// allow an 'add' to be transformed into an 'or'.
bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
+ /// Test whether \p V has a splatted value for all the demanded elements.
+ ///
+ /// On success \p UndefElts will indicate the elements that have UNDEF
+ /// values instead of the splat value, this is only guaranteed to be correct
+ /// for \p DemandedElts.
+ ///
+ /// NOTE: The function will return true for a demanded splat of UNDEF values.
+ bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts);
+
+ /// Test whether \p V has a splatted value.
+ bool isSplatValue(SDValue V, bool AllowUndefs = false);
+
+ /// Match a binop + shuffle pyramid that represents a horizontal reduction
+ /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
+ /// Extract. The reduction must use one of the opcodes listed in /p
+ /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
+ /// Returns the vector that is being reduced on, or SDValue() if a reduction
+ /// was not matched.
+ SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
+ ArrayRef<ISD::NodeType> CandidateBinOps);
+
/// Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
index 580606441a9d..2b2c48d57bc0 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -45,18 +45,21 @@ public:
IsIndexSignExt(IsIndexSignExt) {}
SDValue getBase() { return Base; }
+ SDValue getBase() const { return Base; }
SDValue getIndex() { return Index; }
+ SDValue getIndex() const { return Index; }
- bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG) {
+ bool equalBaseIndex(const BaseIndexOffset &Other,
+ const SelectionDAG &DAG) const {
int64_t Off;
return equalBaseIndex(Other, DAG, Off);
}
- bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG,
- int64_t &Off);
+ bool equalBaseIndex(const BaseIndexOffset &Other, const SelectionDAG &DAG,
+ int64_t &Off) const;
/// Parses tree in Ptr for base, index, offset addresses.
- static BaseIndexOffset match(LSBaseSDNode *N, const SelectionDAG &DAG);
+ static BaseIndexOffset match(const LSBaseSDNode *N, const SelectionDAG &DAG);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index 86df0af7303f..6758c55c696a 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -132,6 +132,7 @@ public:
OPC_CheckChild2Same, OPC_CheckChild3Same,
OPC_CheckPatternPredicate,
OPC_CheckPredicate,
+ OPC_CheckPredicateWithOperands,
OPC_CheckOpcode,
OPC_SwitchOpcode,
OPC_CheckType,
@@ -267,6 +268,17 @@ public:
llvm_unreachable("Tblgen should generate the implementation of this!");
}
+ /// CheckNodePredicateWithOperands - This function is generated by tblgen in
+ /// the target.
+ /// It runs node predicate number PredNo and returns true if it succeeds or
+ /// false if it fails. The number is a private implementation detail to the
+ /// code tblgen produces.
+ virtual bool CheckNodePredicateWithOperands(
+ SDNode *N, unsigned PredNo,
+ const SmallVectorImpl<SDValue> &Operands) const {
+ llvm_unreachable("Tblgen should generate the implementation of this!");
+ }
+
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
unsigned PatternNo,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 1af22185d366..10f284179084 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -672,6 +672,12 @@ public:
case ISD::STRICT_FLOG2:
case ISD::STRICT_FRINT:
case ISD::STRICT_FNEARBYINT:
+ case ISD::STRICT_FMAXNUM:
+ case ISD::STRICT_FMINNUM:
+ case ISD::STRICT_FCEIL:
+ case ISD::STRICT_FFLOOR:
+ case ISD::STRICT_FROUND:
+ case ISD::STRICT_FTRUNC:
return true;
}
}
@@ -1589,15 +1595,38 @@ bool isAllOnesConstant(SDValue V);
/// Returns true if \p V is a constant integer one.
bool isOneConstant(SDValue V);
+/// Return the non-bitcasted source operand of \p V if it exists.
+/// If \p V is not a bitcasted value, it is returned as-is.
+SDValue peekThroughBitcasts(SDValue V);
+
+/// Return the non-bitcasted and one-use source operand of \p V if it exists.
+/// If \p V is not a bitcasted one-use value, it is returned as-is.
+SDValue peekThroughOneUseBitcasts(SDValue V);
+
/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
bool isBitwiseNot(SDValue V);
/// Returns the SDNode if it is a constant splat BuildVector or constant int.
-ConstantSDNode *isConstOrConstSplat(SDValue N);
+ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false);
/// Returns the SDNode if it is a constant splat BuildVector or constant float.
-ConstantFPSDNode *isConstOrConstSplatFP(SDValue N);
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
+
+/// Return true if the value is a constant 0 integer or a splatted vector of
+/// a constant 0 integer (with no undefs).
+/// Build vector implicit truncation is not an issue for null values.
+bool isNullOrNullSplat(SDValue V);
+
+/// Return true if the value is a constant 1 integer or a splatted vector of a
+/// constant 1 integer (with no undefs).
+/// Does not permit build vector implicit truncation.
+bool isOneOrOneSplat(SDValue V);
+
+/// Return true if the value is a constant -1 integer or a splatted vector of a
+/// constant -1 integer (with no undefs).
+/// Does not permit build vector implicit truncation.
+bool isAllOnesOrAllOnesSplat(SDValue V);
class GlobalAddressSDNode : public SDNode {
friend class SelectionDAG;
@@ -2113,12 +2142,15 @@ public:
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
- // In the both nodes address is Op1, mask is Op2:
- // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
- // MaskedStoreSDNode (Chain, ptr, mask, data)
+ // MaskedLoadSDNode (Chain, ptr, mask, passthru)
+ // MaskedStoreSDNode (Chain, data, ptr, mask)
// Mask is a vector of i1 elements
- const SDValue &getBasePtr() const { return getOperand(1); }
- const SDValue &getMask() const { return getOperand(2); }
+ const SDValue &getBasePtr() const {
+ return getOperand(getOpcode() == ISD::MLOAD ? 1 : 2);
+ }
+ const SDValue &getMask() const {
+ return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
+ }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD ||
@@ -2143,7 +2175,10 @@ public:
return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
}
- const SDValue &getSrc0() const { return getOperand(3); }
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getMask() const { return getOperand(2); }
+ const SDValue &getPassThru() const { return getOperand(3); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD;
}
@@ -2175,7 +2210,9 @@ public:
/// memory at base_addr.
bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
- const SDValue &getValue() const { return getOperand(3); }
+ const SDValue &getValue() const { return getOperand(1); }
+ const SDValue &getBasePtr() const { return getOperand(2); }
+ const SDValue &getMask() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSTORE;
@@ -2201,7 +2238,6 @@ public:
const SDValue &getBasePtr() const { return getOperand(3); }
const SDValue &getIndex() const { return getOperand(4); }
const SDValue &getMask() const { return getOperand(2); }
- const SDValue &getValue() const { return getOperand(1); }
const SDValue &getScale() const { return getOperand(5); }
static bool classof(const SDNode *N) {
@@ -2220,6 +2256,8 @@ public:
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
+ const SDValue &getPassThru() const { return getOperand(1); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MGATHER;
}
@@ -2235,6 +2273,8 @@ public:
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
+ const SDValue &getValue() const { return getOperand(1); }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MSCATTER;
}
@@ -2243,32 +2283,60 @@ public:
/// An SDNode that represents everything that will be needed
/// to construct a MachineInstr. These nodes are created during the
/// instruction selection proper phase.
+///
+/// Note that the only supported way to set the `memoperands` is by calling the
+/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
+/// inside the DAG rather than in the node.
class MachineSDNode : public SDNode {
-public:
- using mmo_iterator = MachineMemOperand **;
-
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs) {}
- /// Memory reference descriptions for this instruction.
- mmo_iterator MemRefs = nullptr;
- mmo_iterator MemRefsEnd = nullptr;
+ // We use a pointer union between a single `MachineMemOperand` pointer and
+ // a pointer to an array of `MachineMemOperand` pointers. This is null when
+ // the number of these is zero, the single pointer variant used when the
+ // number is one, and the array is used for larger numbers.
+ //
+ // The array is allocated via the `SelectionDAG`'s allocator and so will
+ // always live until the DAG is cleaned up and doesn't require ownership here.
+ //
+ // We can't use something simpler like `TinyPtrVector` here because `SDNode`
+ // subclasses aren't managed in a conforming C++ manner. See the comments on
+ // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
+ // constraint here is that these don't manage memory with their constructor or
+ // destructor and can be initialized to a good state even if they start off
+ // uninitialized.
+ PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
+
+ // Note that this could be folded into the above `MemRefs` member if doing so
+ // is advantageous at some point. We don't need to store this in most cases.
+ // However, at the moment this doesn't appear to make the allocation any
+ // smaller and makes the code somewhat simpler to read.
+ int NumMemRefs = 0;
public:
- mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefsEnd; }
- bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+ using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
+
+ ArrayRef<MachineMemOperand *> memoperands() const {
+ // Special case the common cases.
+ if (NumMemRefs == 0)
+ return {};
+ if (NumMemRefs == 1)
+ return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
+
+ // Otherwise we have an actual array.
+ return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
+ }
+ mmo_iterator memoperands_begin() const { return memoperands().begin(); }
+ mmo_iterator memoperands_end() const { return memoperands().end(); }
+ bool memoperands_empty() const { return memoperands().empty(); }
- /// Assign this MachineSDNodes's memory reference descriptor
- /// list. This does not transfer ownership.
- void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
- assert(*MMI && "Null mem ref detected!");
- MemRefs = NewMemRefs;
- MemRefsEnd = NewMemRefsEnd;
+ /// Clear out the memory reference descriptor list.
+ void clearMemRefs() {
+ MemRefs = nullptr;
+ NumMemRefs = 0;
}
static bool classof(const SDNode *N) {
@@ -2405,17 +2473,32 @@ namespace ISD {
cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
+ /// Return true if the node is a math/logic binary operator. This corresponds
+ /// to the IR function of the same name.
+ inline bool isBinaryOp(const SDNode *N) {
+ auto Op = N->getOpcode();
+ return (Op == ISD::ADD || Op == ISD::SUB || Op == ISD::MUL ||
+ Op == ISD::AND || Op == ISD::OR || Op == ISD::XOR ||
+ Op == ISD::SHL || Op == ISD::SRL || Op == ISD::SRA ||
+ Op == ISD::SDIV || Op == ISD::UDIV || Op == ISD::SREM ||
+ Op == ISD::UREM || Op == ISD::FADD || Op == ISD::FSUB ||
+ Op == ISD::FMUL || Op == ISD::FDIV || Op == ISD::FREM);
+ }
+
/// Attempt to match a unary predicate against a scalar/splat constant or
/// every element of a constant BUILD_VECTOR.
+ /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
bool matchUnaryPredicate(SDValue Op,
- std::function<bool(ConstantSDNode *)> Match);
+ std::function<bool(ConstantSDNode *)> Match,
+ bool AllowUndefs = false);
/// Attempt to match a binary predicate against a pair of scalar/splat
/// constants or every element of a pair of constant BUILD_VECTORs.
+ /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
bool matchBinaryPredicate(
SDValue LHS, SDValue RHS,
- std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match);
-
+ std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
+ bool AllowUndefs = false);
} // end namespace ISD
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
index 334267d9828b..8c8a7be459fd 100644
--- a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -413,8 +413,14 @@ class raw_ostream;
/// Returns the base index for the given instruction.
SlotIndex getInstructionIndex(const MachineInstr &MI) const {
// Instructions inside a bundle have the same number as the bundle itself.
- const MachineInstr &BundleStart = *getBundleStart(MI.getIterator());
- Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleStart);
+ auto BundleStart = getBundleStart(MI.getIterator());
+ auto BundleEnd = getBundleEnd(MI.getIterator());
+ // Use the first non-debug instruction in the bundle to get SlotIndex.
+ const MachineInstr &BundleNonDebug =
+ *skipDebugInstructionsForward(BundleStart, BundleEnd);
+ assert(!BundleNonDebug.isDebugInstr() &&
+ "Could not use a debug instruction to query mi2iMap.");
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleNonDebug);
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
return itr->second;
}
@@ -442,7 +448,7 @@ class raw_ostream;
/// MI is not required to have an index.
SlotIndex getIndexBefore(const MachineInstr &MI) const {
const MachineBasicBlock *MBB = MI.getParent();
- assert(MBB && "MI must be inserted inna basic block");
+ assert(MBB && "MI must be inserted in a basic block");
MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
while (true) {
if (I == B)
@@ -459,7 +465,7 @@ class raw_ostream;
/// MI is not required to have an index.
SlotIndex getIndexAfter(const MachineInstr &MI) const {
const MachineBasicBlock *MBB = MI.getParent();
- assert(MBB && "MI must be inserted inna basic block");
+ assert(MBB && "MI must be inserted in a basic block");
MachineBasicBlock::const_iterator I = MI, E = MBB->end();
while (true) {
++I;
@@ -674,7 +680,7 @@ class raw_ostream;
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr);
- llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ llvm::sort(idx2MBBMap, Idx2MBBCompare());
}
/// Free the resources that were required to maintain a SlotIndex.
diff --git a/contrib/llvm/include/llvm/CodeGen/StackMaps.h b/contrib/llvm/include/llvm/CodeGen/StackMaps.h
index e584a4136e4f..8be9ae378557 100644
--- a/contrib/llvm/include/llvm/CodeGen/StackMaps.h
+++ b/contrib/llvm/include/llvm/CodeGen/StackMaps.h
@@ -236,25 +236,6 @@ public:
FnInfos.clear();
}
- /// Generate a stackmap record for a stackmap instruction.
- ///
- /// MI must be a raw STACKMAP, not a PATCHPOINT.
- void recordStackMap(const MachineInstr &MI);
-
- /// Generate a stackmap record for a patchpoint instruction.
- void recordPatchPoint(const MachineInstr &MI);
-
- /// Generate a stackmap record for a statepoint instruction.
- void recordStatepoint(const MachineInstr &MI);
-
- /// If there is any stack map data, create a stack map section and serialize
- /// the map info into it. This clears the stack map data structures
- /// afterwards.
- void serializeToStackMapSection();
-
-private:
- static const char *WSMP;
-
using LocationVec = SmallVector<Location, 8>;
using LiveOutVec = SmallVector<LiveOutReg, 8>;
using ConstantPool = MapVector<uint64_t, uint64_t>;
@@ -283,6 +264,31 @@ private:
using FnInfoMap = MapVector<const MCSymbol *, FunctionInfo>;
using CallsiteInfoList = std::vector<CallsiteInfo>;
+ /// Generate a stackmap record for a stackmap instruction.
+ ///
+ /// MI must be a raw STACKMAP, not a PATCHPOINT.
+ void recordStackMap(const MachineInstr &MI);
+
+ /// Generate a stackmap record for a patchpoint instruction.
+ void recordPatchPoint(const MachineInstr &MI);
+
+ /// Generate a stackmap record for a statepoint instruction.
+ void recordStatepoint(const MachineInstr &MI);
+
+ /// If there is any stack map data, create a stack map section and serialize
+ /// the map info into it. This clears the stack map data structures
+ /// afterwards.
+ void serializeToStackMapSection();
+
+ /// Get call site info.
+ CallsiteInfoList &getCSInfos() { return CSInfos; }
+
+ /// Get function info.
+ FnInfoMap &getFnInfos() { return FnInfos; }
+
+private:
+ static const char *WSMP;
+
AsmPrinter &AP;
CallsiteInfoList CSInfos;
ConstantPool ConstPool;
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/contrib/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index f8effee998e3..b4d1da941433 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -207,8 +207,11 @@ public:
return false;
}
- /// Return true if the target needs to disable frame pointer elimination.
- virtual bool noFramePointerElim(const MachineFunction &MF) const;
+ /// Return true if the target wants to keep the frame pointer regardless of
+ /// the function attribute "frame-pointer".
+ virtual bool keepFramePointer(const MachineFunction &MF) const {
+ return false;
+ }
/// hasFP - Return true if the specified function should have a dedicated
/// frame pointer register. For most targets this is true only if the function
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/contrib/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index b5bc561d834c..961b90e9bc12 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -246,14 +246,14 @@ public:
}
/// If the specified machine instruction has a load from a stack slot,
- /// return true along with the FrameIndex of the loaded stack slot and the
- /// machine mem operand containing the reference.
+ /// return true along with the FrameIndices of the loaded stack slot and the
+ /// machine mem operands containing the reference.
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
- virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
+ virtual bool hasLoadFromStackSlot(
+ const MachineInstr &MI,
+ SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
/// If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
@@ -284,14 +284,14 @@ public:
}
/// If the specified machine instruction has a store to a stack slot,
- /// return true along with the FrameIndex of the loaded stack slot and the
- /// machine mem operand containing the reference.
+ /// return true along with the FrameIndices of the loaded stack slot and the
+ /// machine mem operands containing the reference.
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
- virtual bool hasStoreToStackSlot(const MachineInstr &MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
+ virtual bool hasStoreToStackSlot(
+ const MachineInstr &MI,
+ SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
@@ -846,15 +846,33 @@ public:
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
}
+protected:
+ /// Target-dependent implemenation for IsCopyInstr.
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return true along with
/// @Source machine operand and @Destination machine operand.
- virtual bool isCopyInstr(const MachineInstr &MI,
- const MachineOperand *&SourceOpNum,
- const MachineOperand *&Destination) const {
+ virtual bool isCopyInstrImpl(const MachineInstr &MI,
+ const MachineOperand *&Source,
+ const MachineOperand *&Destination) const {
return false;
}
+public:
+ /// If the specific machine instruction is a instruction that moves/copies
+ /// value from one register to another register return true along with
+ /// @Source machine operand and @Destination machine operand.
+ /// For COPY-instruction the method naturally returns true, for all other
+ /// instructions the method calls target-dependent implementation.
+ bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source,
+ const MachineOperand *&Destination) const {
+ if (MI.isCopy()) {
+ Destination = &MI.getOperand(0);
+ Source = &MI.getOperand(1);
+ return true;
+ }
+ return isCopyInstrImpl(MI, Source, Destination);
+ }
+
/// Store the specified register of the given register class to the specified
/// stack frame index. The store instruction is to be added to the given
/// machine basic block before the specified machine instruction. If isKill
@@ -1063,7 +1081,7 @@ public:
/// getAddressSpaceForPseudoSourceKind - Given the kind of memory
/// (e.g. stack) the target returns the corresponding address space.
virtual unsigned
- getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const {
+ getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
return 0;
}
@@ -1118,11 +1136,11 @@ public:
return false;
}
- /// Get the base register and byte offset of an instruction that reads/writes
+ /// Get the base operand and byte offset of an instruction that reads/writes
/// memory.
- virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
- int64_t &Offset,
- const TargetRegisterInfo *TRI) const {
+ virtual bool getMemOperandWithOffset(MachineInstr &MI,
+ MachineOperand *&BaseOp, int64_t &Offset,
+ const TargetRegisterInfo *TRI) const {
return false;
}
@@ -1146,8 +1164,8 @@ public:
/// or
/// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
/// to TargetPassConfig::createMachineScheduler() to have an effect.
- virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
- MachineInstr &SecondLdSt, unsigned BaseReg2,
+ virtual bool shouldClusterMemOps(MachineOperand &BaseOp1,
+ MachineOperand &BaseOp2,
unsigned NumLoads) const {
llvm_unreachable("target did not implement shouldClusterMemOps()");
}
@@ -1617,10 +1635,11 @@ public:
"Target didn't implement TargetInstrInfo::getOutliningType!");
}
- /// Returns target-defined flags defining properties of the MBB for
- /// the outliner.
- virtual unsigned getMachineOutlinerMBBFlags(MachineBasicBlock &MBB) const {
- return 0x0;
+ /// Optional target hook that returns true if \p MBB is safe to outline from,
+ /// and returns any target-specific information in \p Flags.
+ virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
+ unsigned &Flags) const {
+ return true;
}
/// Insert a custom frame for outlined functions.
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLowering.h b/contrib/llvm/include/llvm/CodeGen/TargetLowering.h
index 40540bd6e1ff..23dbaac03ebe 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -29,7 +29,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -163,6 +163,7 @@ public:
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
};
/// Enum that specifies when a multiplication should be expanded.
@@ -268,6 +269,14 @@ public:
return true;
}
+ /// Return true if it is profitable to convert a select of FP constants into
+ /// a constant pool load whose address depends on the select condition. The
+ /// parameter may be used to differentiate a select with FP compare from
+ /// integer compare.
+ virtual bool reduceSelectOfFPConstantLoads(bool IsFPSetCC) const {
+ return true;
+ }
+
/// Return true if multiple condition registers are available.
bool hasMultipleConditionRegisters() const {
return HasMultipleConditionRegisters;
@@ -278,7 +287,7 @@ public:
/// Return the preferred vector type legalization action.
virtual TargetLoweringBase::LegalizeTypeAction
- getPreferredVectorAction(EVT VT) const {
+ getPreferredVectorAction(MVT VT) const {
// The default action for one element vectors is to scalarize
if (VT.getVectorNumElements() == 1)
return TypeScalarizeVector;
@@ -545,6 +554,12 @@ public:
return false;
}
+ /// Return true if inserting a scalar into a variable element of an undef
+ /// vector is more efficiently handled by splatting the scalar instead.
+ virtual bool shouldSplatInsEltVarIndex(EVT) const {
+ return false;
+ }
+
/// Return true if target supports floating point exceptions.
bool hasFloatingPointExceptions() const {
return HasFloatingPointExceptions;
@@ -790,6 +805,38 @@ public:
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
}
+ /// Custom method defined by each target to indicate if an operation which
+ /// may require a scale is supported natively by the target.
+ /// If not, the operation is illegal.
+ virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
+ unsigned Scale) const {
+ return false;
+ }
+
+ /// Some fixed point operations may be natively supported by the target but
+ /// only for specific scales. This method allows for checking
+ /// if the width is supported by the target for a given operation that may
+ /// depend on scale.
+ LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
+ unsigned Scale) const {
+ auto Action = getOperationAction(Op, VT);
+ if (Action != Legal)
+ return Action;
+
+ // This operation is supported in this type but may only work on specific
+ // scales.
+ bool Supported;
+ switch (Op) {
+ default:
+ llvm_unreachable("Unexpected fixed point operation.");
+ case ISD::SMULFIX:
+ Supported = isSupportedFixedPointOperation(Op, VT, Scale);
+ break;
+ }
+
+ return Supported ? Action : Expand;
+ }
+
LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
unsigned EqOpc;
switch (Op) {
@@ -798,6 +845,7 @@ public:
case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
+ case ISD::STRICT_FREM: EqOpc = ISD::FREM; break;
case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
@@ -811,6 +859,12 @@ public:
case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break;
case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break;
case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break;
+ case ISD::STRICT_FMAXNUM: EqOpc = ISD::FMAXNUM; break;
+ case ISD::STRICT_FMINNUM: EqOpc = ISD::FMINNUM; break;
+ case ISD::STRICT_FCEIL: EqOpc = ISD::FCEIL; break;
+ case ISD::STRICT_FFLOOR: EqOpc = ISD::FFLOOR; break;
+ case ISD::STRICT_FROUND: EqOpc = ISD::FROUND; break;
+ case ISD::STRICT_FTRUNC: EqOpc = ISD::FTRUNC; break;
}
auto Action = getOperationAction(EqOpc, VT);
@@ -1199,13 +1253,15 @@ public:
/// reduce runtime.
virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
- // Return true if it is profitable to reduce the given load node to a smaller
- // type.
- //
- // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
- virtual bool shouldReduceLoadWidth(SDNode *Load,
- ISD::LoadExtType ExtTy,
+ /// Return true if it is profitable to reduce a load to a smaller type.
+ /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
+ virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
EVT NewVT) const {
+ // By default, assume that it is cheaper to extract a subvector from a wide
+ // vector load rather than creating multiple narrow vector loads.
+ if (NewVT.isVector() && !Load->hasOneUse())
+ return false;
+
return true;
}
@@ -1428,6 +1484,12 @@ public:
return PrefLoopAlignment;
}
+ /// Should loops be aligned even when the function is marked OptSize (but not
+ /// MinSize).
+ virtual bool alignLoopsWithOptSize() const {
+ return false;
+ }
+
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
/// DEPRECATED: please override useLoadStackGuardNode and customize
@@ -1549,6 +1611,26 @@ public:
llvm_unreachable("Store conditional unimplemented on this target");
}
+ /// Perform a masked atomicrmw using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
+ AtomicRMWInst *AI,
+ Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
+ }
+
+ /// Perform a masked cmpxchg using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
+ }
+
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
@@ -1625,11 +1707,11 @@ public:
return AtomicExpansionKind::None;
}
- /// Returns true if the given atomic cmpxchg should be expanded by the
- /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
- /// (through emitLoadLinked() and emitStoreConditional()).
- virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
- return false;
+ /// Returns how the given atomic cmpxchg should be expanded by the IR-level
+ /// AtomicExpand pass.
+ virtual AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+ return AtomicExpansionKind::None;
}
/// Returns how the IR-level AtomicExpand pass should expand the given
@@ -1687,6 +1769,25 @@ public:
return false;
}
+ /// Return true if it is profitable to transform an integer
+ /// multiplication-by-constant into simpler operations like shifts and adds.
+ /// This may be true if the target does not directly support the
+ /// multiplication operation for the specified type or the sequence of simpler
+ /// ops is faster than the multiply.
+ virtual bool decomposeMulByConstant(EVT VT, SDValue C) const {
+ return false;
+ }
+
+ /// Return true if it is more correct/profitable to use strict FP_TO_INT
+ /// conversion operations - canonicalizing the FP source value instead of
+ /// converting all cases and then selecting based on value.
+ /// This may be true if the target throws exceptions for out of bounds
+ /// conversions or has fast FP CMOV.
+ virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
+ bool IsSigned) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -2015,6 +2116,14 @@ public:
return true;
}
+ /// Return true if the specified immediate is legal for the value input of a
+ /// store instruction.
+ virtual bool isLegalStoreImmediate(int64_t Value) const {
+ // Default implementation assumes that at least 0 works since it is likely
+ // that a zero register exists or a zero immediate is allowed.
+ return Value == 0;
+ }
+
/// Return true if it's significantly cheaper to shift a vector by a uniform
/// scalar than by an amount which will vary across each lane. On x86, for
/// example, there is a "psllw" instruction for the former case, but no simple
@@ -2046,10 +2155,12 @@ public:
case ISD::UADDO:
case ISD::ADDC:
case ISD::ADDE:
+ case ISD::SADDSAT:
+ case ISD::UADDSAT:
case ISD::FMINNUM:
case ISD::FMAXNUM:
- case ISD::FMINNAN:
- case ISD::FMAXNAN:
+ case ISD::FMINIMUM:
+ case ISD::FMAXIMUM:
return true;
default: return false;
}
@@ -2153,6 +2264,12 @@ public:
return false;
}
+ /// Return true if sign-extension from FromTy to ToTy is cheaper than
+ /// zero-extension.
+ virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
+ return false;
+ }
+
/// Return true if the target supplies and combines to a paired load
/// two loaded values of type LoadedType next to each other in memory.
/// RequiredAlignment gives the minimal alignment constraints that must be met
@@ -2292,6 +2409,12 @@ public:
return false;
}
+ /// Try to convert an extract element of a vector binary operation into an
+ /// extract element followed by a scalar operation.
+ virtual bool shouldScalarizeBinop(SDValue VecOp) const {
+ return false;
+ }
+
// Return true if it is profitable to use a scalar input to a BUILD_VECTOR
// even if the vector itself has multiple uses.
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
@@ -2648,7 +2771,7 @@ public:
virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
FunctionLoweringInfo *FLI,
- DivergenceAnalysis *DA) const {
+ LegacyDivergenceAnalysis *DA) const {
return false;
}
@@ -2774,36 +2897,33 @@ public:
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
TargetLoweringOpt &TLO) const;
- /// Helper for SimplifyDemandedBits that can simplify an operation with
- /// multiple uses. This function simplifies operand \p OpIdx of \p User and
- /// then updates \p User with the simplified version. No other uses of
- /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
- /// function behaves exactly like function SimplifyDemandedBits declared
- /// below except that it also updates the DAG by calling
- /// DCI.CommitTargetLoweringOpt.
- bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
- DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
-
- /// Look at Op. At this point, we know that only the DemandedMask bits of the
+ /// Look at Op. At this point, we know that only the DemandedBits bits of the
/// result of Op are ever used downstream. If we can use this information to
/// simplify Op, create a new simplified DAG node and return true, returning
/// the original and new nodes in Old and New. Otherwise, analyze the
/// expression and return a mask of KnownOne and KnownZero bits for the
/// expression (used to simplify the caller). The KnownZero/One bits may only
- /// be accurate for those bits in the DemandedMask.
+ /// be accurate for those bits in the Demanded masks.
/// \p AssumeSingleUse When this parameter is true, this function will
/// attempt to simplify \p Op even if there are multiple uses.
/// Callers are responsible for correctly updating the DAG based on the
/// results of this function, because simply replacing replacing TLO.Old
/// with TLO.New will be incorrect when this parameter is true and TLO.Old
/// has multiple uses.
- bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
- KnownBits &Known,
- TargetLoweringOpt &TLO,
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
+ const APInt &DemandedElts, KnownBits &Known,
+ TargetLoweringOpt &TLO, unsigned Depth = 0,
+ bool AssumeSingleUse = false) const;
+
+ /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
+ /// Adds Op back to the worklist upon success.
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
+ KnownBits &Known, TargetLoweringOpt &TLO,
unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedBits
+ /// Helper wrapper around SimplifyDemandedBits.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
DAGCombinerInfo &DCI) const;
@@ -2826,7 +2946,8 @@ public:
TargetLoweringOpt &TLO, unsigned Depth = 0,
bool AssumeSingleUse = false) const;
- /// Helper wrapper around SimplifyDemandedVectorElts
+ /// Helper wrapper around SimplifyDemandedVectorElts.
+ /// Adds Op back to the worklist upon success.
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
APInt &KnownUndef, APInt &KnownZero,
DAGCombinerInfo &DCI) const;
@@ -2863,11 +2984,30 @@ public:
/// elements, returning true on success. Otherwise, analyze the expression and
/// return a mask of KnownUndef and KnownZero elements for the expression
/// (used to simplify the caller). The KnownUndef/Zero elements may only be
- /// accurate for those bits in the DemandedMask
+ /// accurate for those bits in the DemandedMask.
virtual bool SimplifyDemandedVectorEltsForTargetNode(
SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+ /// Attempt to simplify any target nodes based on the demanded bits/elts,
+ /// returning true on success. Otherwise, analyze the
+ /// expression and return a mask of KnownOne and KnownZero bits for the
+ /// expression (used to simplify the caller). The KnownZero/One bits may only
+ /// be accurate for those bits in the Demanded masks.
+ virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
+ const APInt &DemandedBits,
+ const APInt &DemandedElts,
+ KnownBits &Known,
+ TargetLoweringOpt &TLO,
+ unsigned Depth = 0) const;
+
+ /// If \p SNaN is false, \returns true if \p Op is known to never be any
+ /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
+ /// NaN.
+ virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
+ bool SNaN = false,
+ unsigned Depth = 0) const;
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
CombineLevel Level;
@@ -2935,12 +3075,25 @@ public:
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- /// Return true if it is profitable to move a following shift through this
- // node, adjusting any immediate operands as necessary to preserve semantics.
- // This transformation may not be desirable if it disrupts a particularly
- // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
- // By default, it returns true.
- virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+ /// Return true if it is profitable to move this shift by a constant amount
+ /// though its operand, adjusting any immediate operands as necessary to
+ /// preserve semantics. This transformation may not be desirable if it
+ /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
+ /// extraction in AArch64). By default, it returns true.
+ ///
+ /// @param N the shift node
+ /// @param Level the current DAGCombine legalization level.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const {
+ return true;
+ }
+
+ /// Return true if it is profitable to fold a pair of shifts into a mask.
+ /// This is usually true on most targets. But some targets, like Thumb1,
+ /// have immediate shift instructions, but no immediate "and" instruction;
+ /// this makes the fold unprofitable.
+ virtual bool shouldFoldShiftPairToMask(const SDNode *N,
+ CombineLevel Level) const {
return true;
}
@@ -3488,11 +3641,9 @@ public:
//===--------------------------------------------------------------------===//
// Div utility functions
//
- SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
- bool IsAfterLegalization,
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
SmallVectorImpl<SDNode *> &Created) const;
- SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
- bool IsAfterLegalization,
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
SmallVectorImpl<SDNode *> &Created) const;
/// Targets may override this function to provide custom SDIV lowering for
@@ -3584,12 +3735,68 @@ public:
SDValue LL = SDValue(), SDValue LH = SDValue(),
SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+ /// Expand funnel shift.
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand rotations.
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandROT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
/// Expand float(f32) to SINT(i64) conversion
/// \param N Node to expand
/// \param Result output after conversion
/// \returns True, if the expansion was successful, false otherwise
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+ /// Expand float to UINT conversion
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand UINT(i64) to double(f64) conversion
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
+ SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
+
+ /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
+ /// vector nodes can only succeed if all operations are legal/custom.
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
+ /// vector nodes can only succeed if all operations are legal/custom.
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
+ /// vector nodes can only succeed if all operations are legal/custom.
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ /// Expand ABS nodes. Expands vector/scalar ABS nodes,
+ /// vector nodes can only succeed if all operations are legal/custom.
+ /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
/// Turn load of vector type into a load of the individual elements.
/// \param LD load to expand
/// \returns MERGE_VALUEs of the scalar loads with their chains.
@@ -3627,6 +3834,15 @@ public:
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
SDValue Index) const;
+ /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
+ /// method accepts integers as its arguments.
+ SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
+
+ /// Method for building the DAG expansion of ISD::SMULFIX. This method accepts
+ /// integers as its arguments.
+ SDValue getExpandedFixedPointMultiplication(SDNode *Node,
+ SelectionDAG &DAG) const;
+
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index f5c7fc824ab4..052d1f8bc686 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -90,6 +90,8 @@ public:
const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
const GlobalValue *RHS,
const TargetMachine &TM) const override;
+
+ MCSection *getSectionForCommandLines() const override;
};
class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetPassConfig.h b/contrib/llvm/include/llvm/CodeGen/TargetPassConfig.h
index 8f5c9cb8c3fa..3288711a335d 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetPassConfig.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetPassConfig.h
@@ -90,6 +90,19 @@ private:
AnalysisID StartAfter = nullptr;
AnalysisID StopBefore = nullptr;
AnalysisID StopAfter = nullptr;
+
+ unsigned StartBeforeInstanceNum = 0;
+ unsigned StartBeforeCount = 0;
+
+ unsigned StartAfterInstanceNum = 0;
+ unsigned StartAfterCount = 0;
+
+ unsigned StopBeforeInstanceNum = 0;
+ unsigned StopBeforeCount = 0;
+
+ unsigned StopAfterInstanceNum = 0;
+ unsigned StopAfterCount = 0;
+
bool Started = true;
bool Stopped = false;
bool AddingMachinePasses = false;
@@ -145,13 +158,13 @@ public:
CodeGenOpt::Level getOptLevel() const;
- /// Describe the status of the codegen
- /// pipeline set by this target pass config.
- /// Having a limited codegen pipeline means that options
- /// have been used to restrict what codegen is doing.
- /// In particular, that means that codegen won't emit
- /// assembly code.
- bool hasLimitedCodeGenPipeline() const;
+ /// Returns true if one of the `-start-after`, `-start-before`, `-stop-after`
+ /// or `-stop-before` options is set.
+ static bool hasLimitedCodeGenPipeline();
+
+ /// Returns true if none of the `-stop-before` and `-stop-after` options is
+ /// set.
+ static bool willCompleteCodeGenPipeline();
/// If hasLimitedCodeGenPipeline is true, this method
/// returns a string with the name of the options, separated
@@ -159,13 +172,6 @@ public:
std::string
getLimitedCodeGenPipelineReason(const char *Separator = "/") const;
- /// Check if the codegen pipeline is limited in such a way that it
- /// won't be complete. When the codegen pipeline is not complete,
- /// this means it may not be possible to generate assembly from it.
- bool willCompleteCodeGenPipeline() const {
- return !hasLimitedCodeGenPipeline() || (!StopAfter && !StopBefore);
- }
-
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
bool getEnableTailMerge() const { return EnableTailMerge; }
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 55a8ba630a59..0fbff3137653 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -510,6 +510,13 @@ public:
/// markSuperRegs() and checkAllSuperRegsMarked() in this case.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+ /// Returns false if we can't guarantee that Physreg, specified as an IR asm
+ /// clobber constraint, will be preserved across the statement.
+ virtual bool isAsmClobberable(const MachineFunction &MF,
+ unsigned PhysReg) const {
+ return true;
+ }
+
/// Returns true if PhysReg is unallocatable and constant throughout the
/// function. Used by MachineRegisterInfo::isConstantPhysReg().
virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
@@ -817,13 +824,6 @@ public:
// Do nothing.
}
- /// The creation of multiple copy hints have been implemented in
- /// weightCalcHelper(), but since this affects so many tests for many
- /// targets, this is temporarily disabled per default. THIS SHOULD BE
- /// "GENERAL GOODNESS" and hopefully all targets will update their tests
- /// and enable this soon. This hook should then be removed.
- virtual bool enableMultipleCopyHints() const { return false; }
-
/// Allow the target to reverse allocation order of local live ranges. This
/// will generally allocate shorter local live ranges first. For targets with
/// many registers, this could reduce regalloc compile time by a large
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/contrib/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index 227e591f5a7d..968e4c4b8102 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CODEGEN_TARGETSUBTARGETINFO_H
#define LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -144,6 +145,43 @@ public:
return 0;
}
+ /// Returns true if MI is a dependency breaking zero-idiom instruction for the
+ /// subtarget.
+ ///
+ /// This function also sets bits in Mask related to input operands that
+ /// are not in a data dependency relationship. There is one bit for each
+ /// machine operand; implicit operands follow explicit operands in the bit
+ /// representation used for Mask. An empty (i.e. a mask with all bits
+ /// cleared) means: data dependencies are "broken" for all the explicit input
+ /// machine operands of MI.
+ virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
+ return false;
+ }
+
+ /// Returns true if MI is a dependency breaking instruction for the subtarget.
+ ///
+ /// Similar in behavior to `isZeroIdiom`. However, it knows how to identify
+ /// all dependency breaking instructions (i.e. not just zero-idioms).
+ ///
+ /// As for `isZeroIdiom`, this method returns a mask of "broken" dependencies.
+ /// (See method `isZeroIdiom` for a detailed description of Mask).
+ virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
+ return isZeroIdiom(MI, Mask);
+ }
+
+ /// Returns true if MI is a candidate for move elimination.
+ ///
+ /// A candidate for move elimination may be optimized out at register renaming
+ /// stage. Subtargets can specify the set of optimizable moves by
+ /// instantiating tablegen class `IsOptimizableRegisterMove` (see
+ /// llvm/Target/TargetInstrPredicate.td).
+ ///
+ /// SubtargetEmitter is responsible for processing all the definitions of class
+ /// IsOptimizableRegisterMove, and auto-generate an override for this method.
+ virtual bool isOptimizableRegisterMove(const MachineInstr *MI) const {
+ return false;
+ }
+
/// True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
diff --git a/contrib/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h b/contrib/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h
index 3ad6760d8813..219fff988f6e 100644
--- a/contrib/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/WasmEHFuncInfo.h
@@ -14,13 +14,15 @@
#ifndef LLVM_CODEGEN_WASMEHFUNCINFO_H
#define LLVM_CODEGEN_WASMEHFUNCINFO_H
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/BasicBlock.h"
namespace llvm {
+enum EventTag { CPP_EXCEPTION = 0, C_LONGJMP = 1 };
+
using BBOrMBB = PointerUnion<const BasicBlock *, MachineBasicBlock *>;
struct WasmEHFuncInfo {
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CVRecord.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CVRecord.h
index 9dbeb438f4ae..11ca9ff108de 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/CVRecord.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CVRecord.h
@@ -45,13 +45,8 @@ public:
return RecordData.drop_front(sizeof(RecordPrefix));
}
- Optional<uint32_t> hash() const { return Hash; }
-
- void setHash(uint32_t Value) { Hash = Value; }
-
Kind Type;
ArrayRef<uint8_t> RecordData;
- Optional<uint32_t> Hash;
};
template <typename Kind> struct RemappedRecord {
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
index 4ce9f68cffd9..8e0d9f608e93 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -231,6 +231,8 @@ enum class FrameProcedureOptions : uint32_t {
Inlined = 0x00000800,
StrictSecurityChecks = 0x00001000,
SafeBuffers = 0x00002000,
+ EncodedLocalBasePointerMask = 0x0000C000,
+ EncodedParamBasePointerMask = 0x00030000,
ProfileGuidedOptimization = 0x00040000,
ValidProfileCounts = 0x00080000,
OptimizedForSpeed = 0x00100000,
@@ -356,7 +358,9 @@ enum class PointerOptions : uint32_t {
Const = 0x00000400,
Unaligned = 0x00000800,
Restrict = 0x00001000,
- WinRTSmartPointer = 0x00080000
+ WinRTSmartPointer = 0x00080000,
+ LValueRefThisPointer = 0x00100000,
+ RValueRefThisPointer = 0x00200000
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PointerOptions)
@@ -510,6 +514,19 @@ enum class RegisterId : uint16_t {
#undef CV_REGISTER
};
+/// Two-bit value indicating which register is the designated frame pointer
+/// register. Appears in the S_FRAMEPROC record flags.
+enum class EncodedFramePtrReg : uint8_t {
+ None = 0,
+ StackPtr = 1,
+ FramePtr = 2,
+ BasePtr = 3,
+};
+
+RegisterId decodeFramePtrReg(EncodedFramePtrReg EncodedReg, CPUType CPU);
+
+EncodedFramePtrReg encodeFramePtrReg(RegisterId Reg, CPUType CPU);
+
/// These values correspond to the THUNK_ORDINAL enumeration.
enum class ThunkOrdinal : uint8_t {
Standard,
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewError.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewError.h
index 586a720ce6e4..d4615d02220d 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewError.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewError.h
@@ -24,23 +24,32 @@ enum class cv_error_code {
no_records,
unknown_member_record,
};
+} // namespace codeview
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::codeview::cv_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace codeview {
+const std::error_category &CVErrorCategory();
+
+inline std::error_code make_error_code(cv_error_code E) {
+ return std::error_code(static_cast<int>(E), CVErrorCategory());
+}
/// Base class for errors originating when parsing raw PDB files
-class CodeViewError : public ErrorInfo<CodeViewError> {
+class CodeViewError : public ErrorInfo<CodeViewError, StringError> {
public:
+ using ErrorInfo<CodeViewError,
+ StringError>::ErrorInfo; // inherit constructors
+ CodeViewError(const Twine &S) : ErrorInfo(S, cv_error_code::unspecified) {}
static char ID;
- CodeViewError(cv_error_code C);
- CodeViewError(const std::string &Context);
- CodeViewError(cv_error_code C, const std::string &Context);
+};
- void log(raw_ostream &OS) const override;
- const std::string &getErrorMessage() const;
- std::error_code convertToErrorCode() const override;
+} // namespace codeview
+} // namespace llvm
-private:
- std::string ErrMsg;
- cv_error_code Code;
-};
-}
-}
#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def
index 6da8893bd61a..fdfcf4d53a23 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewRegisters.def
@@ -18,251 +18,342 @@
// This currently only contains the "register subset shared by all processor
// types" (ERR etc.) and the x86 registers.
-CV_REGISTER(CVRegERR, 30000)
-CV_REGISTER(CVRegTEB, 30001)
-CV_REGISTER(CVRegTIMER, 30002)
-CV_REGISTER(CVRegEFAD1, 30003)
-CV_REGISTER(CVRegEFAD2, 30004)
-CV_REGISTER(CVRegEFAD3, 30005)
-CV_REGISTER(CVRegVFRAME, 30006)
-CV_REGISTER(CVRegHANDLE, 30007)
-CV_REGISTER(CVRegPARAMS, 30008)
-CV_REGISTER(CVRegLOCALS, 30009)
-CV_REGISTER(CVRegTID, 30010)
-CV_REGISTER(CVRegENV, 30011)
-CV_REGISTER(CVRegCMDLN, 30012)
-
-CV_REGISTER(CVRegNONE, 0)
-CV_REGISTER(CVRegAL, 1)
-CV_REGISTER(CVRegCL, 2)
-CV_REGISTER(CVRegDL, 3)
-CV_REGISTER(CVRegBL, 4)
-CV_REGISTER(CVRegAH, 5)
-CV_REGISTER(CVRegCH, 6)
-CV_REGISTER(CVRegDH, 7)
-CV_REGISTER(CVRegBH, 8)
-CV_REGISTER(CVRegAX, 9)
-CV_REGISTER(CVRegCX, 10)
-CV_REGISTER(CVRegDX, 11)
-CV_REGISTER(CVRegBX, 12)
-CV_REGISTER(CVRegSP, 13)
-CV_REGISTER(CVRegBP, 14)
-CV_REGISTER(CVRegSI, 15)
-CV_REGISTER(CVRegDI, 16)
-CV_REGISTER(CVRegEAX, 17)
-CV_REGISTER(CVRegECX, 18)
-CV_REGISTER(CVRegEDX, 19)
-CV_REGISTER(CVRegEBX, 20)
-CV_REGISTER(CVRegESP, 21)
-CV_REGISTER(CVRegEBP, 22)
-CV_REGISTER(CVRegESI, 23)
-CV_REGISTER(CVRegEDI, 24)
-CV_REGISTER(CVRegES, 25)
-CV_REGISTER(CVRegCS, 26)
-CV_REGISTER(CVRegSS, 27)
-CV_REGISTER(CVRegDS, 28)
-CV_REGISTER(CVRegFS, 29)
-CV_REGISTER(CVRegGS, 30)
-CV_REGISTER(CVRegIP, 31)
-CV_REGISTER(CVRegFLAGS, 32)
-CV_REGISTER(CVRegEIP, 33)
-CV_REGISTER(CVRegEFLAGS, 34)
-CV_REGISTER(CVRegTEMP, 40)
-CV_REGISTER(CVRegTEMPH, 41)
-CV_REGISTER(CVRegQUOTE, 42)
-CV_REGISTER(CVRegPCDR3, 43)
-CV_REGISTER(CVRegPCDR4, 44)
-CV_REGISTER(CVRegPCDR5, 45)
-CV_REGISTER(CVRegPCDR6, 46)
-CV_REGISTER(CVRegPCDR7, 47)
-CV_REGISTER(CVRegCR0, 80)
-CV_REGISTER(CVRegCR1, 81)
-CV_REGISTER(CVRegCR2, 82)
-CV_REGISTER(CVRegCR3, 83)
-CV_REGISTER(CVRegCR4, 84)
-CV_REGISTER(CVRegDR0, 90)
-CV_REGISTER(CVRegDR1, 91)
-CV_REGISTER(CVRegDR2, 92)
-CV_REGISTER(CVRegDR3, 93)
-CV_REGISTER(CVRegDR4, 94)
-CV_REGISTER(CVRegDR5, 95)
-CV_REGISTER(CVRegDR6, 96)
-CV_REGISTER(CVRegDR7, 97)
-CV_REGISTER(CVRegGDTR, 110)
-CV_REGISTER(CVRegGDTL, 111)
-CV_REGISTER(CVRegIDTR, 112)
-CV_REGISTER(CVRegIDTL, 113)
-CV_REGISTER(CVRegLDTR, 114)
-CV_REGISTER(CVRegTR, 115)
-
-CV_REGISTER(CVRegPSEUDO1, 116)
-CV_REGISTER(CVRegPSEUDO2, 117)
-CV_REGISTER(CVRegPSEUDO3, 118)
-CV_REGISTER(CVRegPSEUDO4, 119)
-CV_REGISTER(CVRegPSEUDO5, 120)
-CV_REGISTER(CVRegPSEUDO6, 121)
-CV_REGISTER(CVRegPSEUDO7, 122)
-CV_REGISTER(CVRegPSEUDO8, 123)
-CV_REGISTER(CVRegPSEUDO9, 124)
-
-CV_REGISTER(CVRegST0, 128)
-CV_REGISTER(CVRegST1, 129)
-CV_REGISTER(CVRegST2, 130)
-CV_REGISTER(CVRegST3, 131)
-CV_REGISTER(CVRegST4, 132)
-CV_REGISTER(CVRegST5, 133)
-CV_REGISTER(CVRegST6, 134)
-CV_REGISTER(CVRegST7, 135)
-CV_REGISTER(CVRegCTRL, 136)
-CV_REGISTER(CVRegSTAT, 137)
-CV_REGISTER(CVRegTAG, 138)
-CV_REGISTER(CVRegFPIP, 139)
-CV_REGISTER(CVRegFPCS, 140)
-CV_REGISTER(CVRegFPDO, 141)
-CV_REGISTER(CVRegFPDS, 142)
-CV_REGISTER(CVRegISEM, 143)
-CV_REGISTER(CVRegFPEIP, 144)
-CV_REGISTER(CVRegFPEDO, 145)
-
-CV_REGISTER(CVRegMM0, 146)
-CV_REGISTER(CVRegMM1, 147)
-CV_REGISTER(CVRegMM2, 148)
-CV_REGISTER(CVRegMM3, 149)
-CV_REGISTER(CVRegMM4, 150)
-CV_REGISTER(CVRegMM5, 151)
-CV_REGISTER(CVRegMM6, 152)
-CV_REGISTER(CVRegMM7, 153)
-
-CV_REGISTER(CVRegXMM0, 154)
-CV_REGISTER(CVRegXMM1, 155)
-CV_REGISTER(CVRegXMM2, 156)
-CV_REGISTER(CVRegXMM3, 157)
-CV_REGISTER(CVRegXMM4, 158)
-CV_REGISTER(CVRegXMM5, 159)
-CV_REGISTER(CVRegXMM6, 160)
-CV_REGISTER(CVRegXMM7, 161)
-
-CV_REGISTER(CVRegMXCSR, 211)
-
-CV_REGISTER(CVRegEDXEAX, 212)
-
-CV_REGISTER(CVRegEMM0L, 220)
-CV_REGISTER(CVRegEMM1L, 221)
-CV_REGISTER(CVRegEMM2L, 222)
-CV_REGISTER(CVRegEMM3L, 223)
-CV_REGISTER(CVRegEMM4L, 224)
-CV_REGISTER(CVRegEMM5L, 225)
-CV_REGISTER(CVRegEMM6L, 226)
-CV_REGISTER(CVRegEMM7L, 227)
-
-CV_REGISTER(CVRegEMM0H, 228)
-CV_REGISTER(CVRegEMM1H, 229)
-CV_REGISTER(CVRegEMM2H, 230)
-CV_REGISTER(CVRegEMM3H, 231)
-CV_REGISTER(CVRegEMM4H, 232)
-CV_REGISTER(CVRegEMM5H, 233)
-CV_REGISTER(CVRegEMM6H, 234)
-CV_REGISTER(CVRegEMM7H, 235)
-
-CV_REGISTER(CVRegMM00, 236)
-CV_REGISTER(CVRegMM01, 237)
-CV_REGISTER(CVRegMM10, 238)
-CV_REGISTER(CVRegMM11, 239)
-CV_REGISTER(CVRegMM20, 240)
-CV_REGISTER(CVRegMM21, 241)
-CV_REGISTER(CVRegMM30, 242)
-CV_REGISTER(CVRegMM31, 243)
-CV_REGISTER(CVRegMM40, 244)
-CV_REGISTER(CVRegMM41, 245)
-CV_REGISTER(CVRegMM50, 246)
-CV_REGISTER(CVRegMM51, 247)
-CV_REGISTER(CVRegMM60, 248)
-CV_REGISTER(CVRegMM61, 249)
-CV_REGISTER(CVRegMM70, 250)
-CV_REGISTER(CVRegMM71, 251)
-
-CV_REGISTER(CVRegBND0, 396)
-CV_REGISTER(CVRegBND1, 397)
-CV_REGISTER(CVRegBND2, 398)
-
-
-CV_REGISTER(CVRegXMM8, 252)
-CV_REGISTER(CVRegXMM9, 253)
-CV_REGISTER(CVRegXMM10, 254)
-CV_REGISTER(CVRegXMM11, 255)
-CV_REGISTER(CVRegXMM12, 256)
-CV_REGISTER(CVRegXMM13, 257)
-CV_REGISTER(CVRegXMM14, 258)
-CV_REGISTER(CVRegXMM15, 259)
-
-
-CV_REGISTER(CVRegSIL, 324)
-CV_REGISTER(CVRegDIL, 325)
-CV_REGISTER(CVRegBPL, 326)
-CV_REGISTER(CVRegSPL, 327)
-
-CV_REGISTER(CVRegRAX, 328)
-CV_REGISTER(CVRegRBX, 329)
-CV_REGISTER(CVRegRCX, 330)
-CV_REGISTER(CVRegRDX, 331)
-CV_REGISTER(CVRegRSI, 332)
-CV_REGISTER(CVRegRDI, 333)
-CV_REGISTER(CVRegRBP, 334)
-CV_REGISTER(CVRegRSP, 335)
-
-CV_REGISTER(CVRegR8, 336)
-CV_REGISTER(CVRegR9, 337)
-CV_REGISTER(CVRegR10, 338)
-CV_REGISTER(CVRegR11, 339)
-CV_REGISTER(CVRegR12, 340)
-CV_REGISTER(CVRegR13, 341)
-CV_REGISTER(CVRegR14, 342)
-CV_REGISTER(CVRegR15, 343)
-
-CV_REGISTER(CVRegR8B, 344)
-CV_REGISTER(CVRegR9B, 345)
-CV_REGISTER(CVRegR10B, 346)
-CV_REGISTER(CVRegR11B, 347)
-CV_REGISTER(CVRegR12B, 348)
-CV_REGISTER(CVRegR13B, 349)
-CV_REGISTER(CVRegR14B, 350)
-CV_REGISTER(CVRegR15B, 351)
-
-CV_REGISTER(CVRegR8W, 352)
-CV_REGISTER(CVRegR9W, 353)
-CV_REGISTER(CVRegR10W, 354)
-CV_REGISTER(CVRegR11W, 355)
-CV_REGISTER(CVRegR12W, 356)
-CV_REGISTER(CVRegR13W, 357)
-CV_REGISTER(CVRegR14W, 358)
-CV_REGISTER(CVRegR15W, 359)
-
-CV_REGISTER(CVRegR8D, 360)
-CV_REGISTER(CVRegR9D, 361)
-CV_REGISTER(CVRegR10D, 362)
-CV_REGISTER(CVRegR11D, 363)
-CV_REGISTER(CVRegR12D, 364)
-CV_REGISTER(CVRegR13D, 365)
-CV_REGISTER(CVRegR14D, 366)
-CV_REGISTER(CVRegR15D, 367)
+// Some system headers define macros that conflict with our enums. Every
+// compiler supported by LLVM has the push_macro and pop_macro pragmas, so use
+// them to avoid the conflict.
+#pragma push_macro("CR0")
+#pragma push_macro("CR1")
+#pragma push_macro("CR2")
+#pragma push_macro("CR3")
+#pragma push_macro("CR4")
+
+CV_REGISTER(ERR, 30000)
+CV_REGISTER(TEB, 30001)
+CV_REGISTER(TIMER, 30002)
+CV_REGISTER(EFAD1, 30003)
+CV_REGISTER(EFAD2, 30004)
+CV_REGISTER(EFAD3, 30005)
+CV_REGISTER(VFRAME, 30006)
+CV_REGISTER(HANDLE, 30007)
+CV_REGISTER(PARAMS, 30008)
+CV_REGISTER(LOCALS, 30009)
+CV_REGISTER(TID, 30010)
+CV_REGISTER(ENV, 30011)
+CV_REGISTER(CMDLN, 30012)
+
+CV_REGISTER(NONE, 0)
+CV_REGISTER(AL, 1)
+CV_REGISTER(CL, 2)
+CV_REGISTER(DL, 3)
+CV_REGISTER(BL, 4)
+CV_REGISTER(AH, 5)
+CV_REGISTER(CH, 6)
+CV_REGISTER(DH, 7)
+CV_REGISTER(BH, 8)
+CV_REGISTER(AX, 9)
+CV_REGISTER(CX, 10)
+CV_REGISTER(DX, 11)
+CV_REGISTER(BX, 12)
+CV_REGISTER(SP, 13)
+CV_REGISTER(BP, 14)
+CV_REGISTER(SI, 15)
+CV_REGISTER(DI, 16)
+CV_REGISTER(EAX, 17)
+CV_REGISTER(ECX, 18)
+CV_REGISTER(EDX, 19)
+CV_REGISTER(EBX, 20)
+CV_REGISTER(ESP, 21)
+CV_REGISTER(EBP, 22)
+CV_REGISTER(ESI, 23)
+CV_REGISTER(EDI, 24)
+CV_REGISTER(ES, 25)
+CV_REGISTER(CS, 26)
+CV_REGISTER(SS, 27)
+CV_REGISTER(DS, 28)
+CV_REGISTER(FS, 29)
+CV_REGISTER(GS, 30)
+CV_REGISTER(IP, 31)
+CV_REGISTER(FLAGS, 32)
+CV_REGISTER(EIP, 33)
+CV_REGISTER(EFLAGS, 34)
+CV_REGISTER(TEMP, 40)
+CV_REGISTER(TEMPH, 41)
+CV_REGISTER(QUOTE, 42)
+CV_REGISTER(PCDR3, 43)
+CV_REGISTER(PCDR4, 44)
+CV_REGISTER(PCDR5, 45)
+CV_REGISTER(PCDR6, 46)
+CV_REGISTER(PCDR7, 47)
+CV_REGISTER(CR0, 80)
+CV_REGISTER(CR1, 81)
+CV_REGISTER(CR2, 82)
+CV_REGISTER(CR3, 83)
+CV_REGISTER(CR4, 84)
+CV_REGISTER(DR0, 90)
+CV_REGISTER(DR1, 91)
+CV_REGISTER(DR2, 92)
+CV_REGISTER(DR3, 93)
+CV_REGISTER(DR4, 94)
+CV_REGISTER(DR5, 95)
+CV_REGISTER(DR6, 96)
+CV_REGISTER(DR7, 97)
+CV_REGISTER(GDTR, 110)
+CV_REGISTER(GDTL, 111)
+CV_REGISTER(IDTR, 112)
+CV_REGISTER(IDTL, 113)
+CV_REGISTER(LDTR, 114)
+CV_REGISTER(TR, 115)
+
+CV_REGISTER(PSEUDO1, 116)
+CV_REGISTER(PSEUDO2, 117)
+CV_REGISTER(PSEUDO3, 118)
+CV_REGISTER(PSEUDO4, 119)
+CV_REGISTER(PSEUDO5, 120)
+CV_REGISTER(PSEUDO6, 121)
+CV_REGISTER(PSEUDO7, 122)
+CV_REGISTER(PSEUDO8, 123)
+CV_REGISTER(PSEUDO9, 124)
+
+CV_REGISTER(ST0, 128)
+CV_REGISTER(ST1, 129)
+CV_REGISTER(ST2, 130)
+CV_REGISTER(ST3, 131)
+CV_REGISTER(ST4, 132)
+CV_REGISTER(ST5, 133)
+CV_REGISTER(ST6, 134)
+CV_REGISTER(ST7, 135)
+CV_REGISTER(CTRL, 136)
+CV_REGISTER(STAT, 137)
+CV_REGISTER(TAG, 138)
+CV_REGISTER(FPIP, 139)
+CV_REGISTER(FPCS, 140)
+CV_REGISTER(FPDO, 141)
+CV_REGISTER(FPDS, 142)
+CV_REGISTER(ISEM, 143)
+CV_REGISTER(FPEIP, 144)
+CV_REGISTER(FPEDO, 145)
+
+CV_REGISTER(MM0, 146)
+CV_REGISTER(MM1, 147)
+CV_REGISTER(MM2, 148)
+CV_REGISTER(MM3, 149)
+CV_REGISTER(MM4, 150)
+CV_REGISTER(MM5, 151)
+CV_REGISTER(MM6, 152)
+CV_REGISTER(MM7, 153)
+
+CV_REGISTER(XMM0, 154)
+CV_REGISTER(XMM1, 155)
+CV_REGISTER(XMM2, 156)
+CV_REGISTER(XMM3, 157)
+CV_REGISTER(XMM4, 158)
+CV_REGISTER(XMM5, 159)
+CV_REGISTER(XMM6, 160)
+CV_REGISTER(XMM7, 161)
+
+CV_REGISTER(MXCSR, 211)
+
+CV_REGISTER(EDXEAX, 212)
+
+CV_REGISTER(EMM0L, 220)
+CV_REGISTER(EMM1L, 221)
+CV_REGISTER(EMM2L, 222)
+CV_REGISTER(EMM3L, 223)
+CV_REGISTER(EMM4L, 224)
+CV_REGISTER(EMM5L, 225)
+CV_REGISTER(EMM6L, 226)
+CV_REGISTER(EMM7L, 227)
+
+CV_REGISTER(EMM0H, 228)
+CV_REGISTER(EMM1H, 229)
+CV_REGISTER(EMM2H, 230)
+CV_REGISTER(EMM3H, 231)
+CV_REGISTER(EMM4H, 232)
+CV_REGISTER(EMM5H, 233)
+CV_REGISTER(EMM6H, 234)
+CV_REGISTER(EMM7H, 235)
+
+CV_REGISTER(MM00, 236)
+CV_REGISTER(MM01, 237)
+CV_REGISTER(MM10, 238)
+CV_REGISTER(MM11, 239)
+CV_REGISTER(MM20, 240)
+CV_REGISTER(MM21, 241)
+CV_REGISTER(MM30, 242)
+CV_REGISTER(MM31, 243)
+CV_REGISTER(MM40, 244)
+CV_REGISTER(MM41, 245)
+CV_REGISTER(MM50, 246)
+CV_REGISTER(MM51, 247)
+CV_REGISTER(MM60, 248)
+CV_REGISTER(MM61, 249)
+CV_REGISTER(MM70, 250)
+CV_REGISTER(MM71, 251)
+
+CV_REGISTER(BND0, 396)
+CV_REGISTER(BND1, 397)
+CV_REGISTER(BND2, 398)
+
+
+CV_REGISTER(XMM8, 252)
+CV_REGISTER(XMM9, 253)
+CV_REGISTER(XMM10, 254)
+CV_REGISTER(XMM11, 255)
+CV_REGISTER(XMM12, 256)
+CV_REGISTER(XMM13, 257)
+CV_REGISTER(XMM14, 258)
+CV_REGISTER(XMM15, 259)
+
+
+CV_REGISTER(SIL, 324)
+CV_REGISTER(DIL, 325)
+CV_REGISTER(BPL, 326)
+CV_REGISTER(SPL, 327)
+
+CV_REGISTER(RAX, 328)
+CV_REGISTER(RBX, 329)
+CV_REGISTER(RCX, 330)
+CV_REGISTER(RDX, 331)
+CV_REGISTER(RSI, 332)
+CV_REGISTER(RDI, 333)
+CV_REGISTER(RBP, 334)
+CV_REGISTER(RSP, 335)
+
+CV_REGISTER(R8, 336)
+CV_REGISTER(R9, 337)
+CV_REGISTER(R10, 338)
+CV_REGISTER(R11, 339)
+CV_REGISTER(R12, 340)
+CV_REGISTER(R13, 341)
+CV_REGISTER(R14, 342)
+CV_REGISTER(R15, 343)
+
+CV_REGISTER(R8B, 344)
+CV_REGISTER(R9B, 345)
+CV_REGISTER(R10B, 346)
+CV_REGISTER(R11B, 347)
+CV_REGISTER(R12B, 348)
+CV_REGISTER(R13B, 349)
+CV_REGISTER(R14B, 350)
+CV_REGISTER(R15B, 351)
+
+CV_REGISTER(R8W, 352)
+CV_REGISTER(R9W, 353)
+CV_REGISTER(R10W, 354)
+CV_REGISTER(R11W, 355)
+CV_REGISTER(R12W, 356)
+CV_REGISTER(R13W, 357)
+CV_REGISTER(R14W, 358)
+CV_REGISTER(R15W, 359)
+
+CV_REGISTER(R8D, 360)
+CV_REGISTER(R9D, 361)
+CV_REGISTER(R10D, 362)
+CV_REGISTER(R11D, 363)
+CV_REGISTER(R12D, 364)
+CV_REGISTER(R13D, 365)
+CV_REGISTER(R14D, 366)
+CV_REGISTER(R15D, 367)
// cvconst.h defines both CV_REG_YMM0 (252) and CV_AMD64_YMM0 (368). Keep the
// original prefix to distinguish them.
-CV_REGISTER(CVRegAMD64_YMM0, 368)
-CV_REGISTER(CVRegAMD64_YMM1, 369)
-CV_REGISTER(CVRegAMD64_YMM2, 370)
-CV_REGISTER(CVRegAMD64_YMM3, 371)
-CV_REGISTER(CVRegAMD64_YMM4, 372)
-CV_REGISTER(CVRegAMD64_YMM5, 373)
-CV_REGISTER(CVRegAMD64_YMM6, 374)
-CV_REGISTER(CVRegAMD64_YMM7, 375)
-CV_REGISTER(CVRegAMD64_YMM8, 376)
-CV_REGISTER(CVRegAMD64_YMM9, 377)
-CV_REGISTER(CVRegAMD64_YMM10, 378)
-CV_REGISTER(CVRegAMD64_YMM11, 379)
-CV_REGISTER(CVRegAMD64_YMM12, 380)
-CV_REGISTER(CVRegAMD64_YMM13, 381)
-CV_REGISTER(CVRegAMD64_YMM14, 382)
-CV_REGISTER(CVRegAMD64_YMM15, 383)
+CV_REGISTER(AMD64_YMM0, 368)
+CV_REGISTER(AMD64_YMM1, 369)
+CV_REGISTER(AMD64_YMM2, 370)
+CV_REGISTER(AMD64_YMM3, 371)
+CV_REGISTER(AMD64_YMM4, 372)
+CV_REGISTER(AMD64_YMM5, 373)
+CV_REGISTER(AMD64_YMM6, 374)
+CV_REGISTER(AMD64_YMM7, 375)
+CV_REGISTER(AMD64_YMM8, 376)
+CV_REGISTER(AMD64_YMM9, 377)
+CV_REGISTER(AMD64_YMM10, 378)
+CV_REGISTER(AMD64_YMM11, 379)
+CV_REGISTER(AMD64_YMM12, 380)
+CV_REGISTER(AMD64_YMM13, 381)
+CV_REGISTER(AMD64_YMM14, 382)
+CV_REGISTER(AMD64_YMM15, 383)
+
+CV_REGISTER(AMD64_XMM16, 694)
+CV_REGISTER(AMD64_XMM17, 695)
+CV_REGISTER(AMD64_XMM18, 696)
+CV_REGISTER(AMD64_XMM19, 697)
+CV_REGISTER(AMD64_XMM20, 698)
+CV_REGISTER(AMD64_XMM21, 699)
+CV_REGISTER(AMD64_XMM22, 700)
+CV_REGISTER(AMD64_XMM23, 701)
+CV_REGISTER(AMD64_XMM24, 702)
+CV_REGISTER(AMD64_XMM25, 703)
+CV_REGISTER(AMD64_XMM26, 704)
+CV_REGISTER(AMD64_XMM27, 705)
+CV_REGISTER(AMD64_XMM28, 706)
+CV_REGISTER(AMD64_XMM29, 707)
+CV_REGISTER(AMD64_XMM30, 708)
+CV_REGISTER(AMD64_XMM31, 709)
+
+CV_REGISTER(AMD64_YMM16, 710)
+CV_REGISTER(AMD64_YMM17, 711)
+CV_REGISTER(AMD64_YMM18, 712)
+CV_REGISTER(AMD64_YMM19, 713)
+CV_REGISTER(AMD64_YMM20, 714)
+CV_REGISTER(AMD64_YMM21, 715)
+CV_REGISTER(AMD64_YMM22, 716)
+CV_REGISTER(AMD64_YMM23, 717)
+CV_REGISTER(AMD64_YMM24, 718)
+CV_REGISTER(AMD64_YMM25, 719)
+CV_REGISTER(AMD64_YMM26, 720)
+CV_REGISTER(AMD64_YMM27, 721)
+CV_REGISTER(AMD64_YMM28, 722)
+CV_REGISTER(AMD64_YMM29, 723)
+CV_REGISTER(AMD64_YMM30, 724)
+CV_REGISTER(AMD64_YMM31, 725)
+
+CV_REGISTER(AMD64_ZMM0, 726)
+CV_REGISTER(AMD64_ZMM1, 727)
+CV_REGISTER(AMD64_ZMM2, 728)
+CV_REGISTER(AMD64_ZMM3, 729)
+CV_REGISTER(AMD64_ZMM4, 730)
+CV_REGISTER(AMD64_ZMM5, 731)
+CV_REGISTER(AMD64_ZMM6, 732)
+CV_REGISTER(AMD64_ZMM7, 733)
+CV_REGISTER(AMD64_ZMM8, 734)
+CV_REGISTER(AMD64_ZMM9, 735)
+CV_REGISTER(AMD64_ZMM10, 736)
+CV_REGISTER(AMD64_ZMM11, 737)
+CV_REGISTER(AMD64_ZMM12, 738)
+CV_REGISTER(AMD64_ZMM13, 739)
+CV_REGISTER(AMD64_ZMM14, 740)
+CV_REGISTER(AMD64_ZMM15, 741)
+CV_REGISTER(AMD64_ZMM16, 742)
+CV_REGISTER(AMD64_ZMM17, 743)
+CV_REGISTER(AMD64_ZMM18, 744)
+CV_REGISTER(AMD64_ZMM19, 745)
+CV_REGISTER(AMD64_ZMM20, 746)
+CV_REGISTER(AMD64_ZMM21, 747)
+CV_REGISTER(AMD64_ZMM22, 748)
+CV_REGISTER(AMD64_ZMM23, 749)
+CV_REGISTER(AMD64_ZMM24, 750)
+CV_REGISTER(AMD64_ZMM25, 751)
+CV_REGISTER(AMD64_ZMM26, 752)
+CV_REGISTER(AMD64_ZMM27, 753)
+CV_REGISTER(AMD64_ZMM28, 754)
+CV_REGISTER(AMD64_ZMM29, 755)
+CV_REGISTER(AMD64_ZMM30, 756)
+CV_REGISTER(AMD64_ZMM31, 757)
+
+CV_REGISTER(AMD64_K0, 758)
+CV_REGISTER(AMD64_K1, 759)
+CV_REGISTER(AMD64_K2, 760)
+CV_REGISTER(AMD64_K3, 761)
+CV_REGISTER(AMD64_K4, 762)
+CV_REGISTER(AMD64_K5, 763)
+CV_REGISTER(AMD64_K6, 764)
+CV_REGISTER(AMD64_K7, 765)
+
+#pragma pop_macro("CR0")
+#pragma pop_macro("CR1")
+#pragma pop_macro("CR2")
+#pragma pop_macro("CR3")
+#pragma pop_macro("CR4")
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
index 1e329c7c3f14..847d93f0e985 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
@@ -13,6 +13,7 @@
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -26,21 +27,23 @@ public:
}
Error initialize(BinaryStreamReader Reader);
+ Error initialize(BinaryStreamRef Stream);
FixedStreamArray<FrameData>::Iterator begin() const { return Frames.begin(); }
FixedStreamArray<FrameData>::Iterator end() const { return Frames.end(); }
- const void *getRelocPtr() const { return RelocPtr; }
+ const support::ulittle32_t *getRelocPtr() const { return RelocPtr; }
private:
- const uint32_t *RelocPtr = nullptr;
+ const support::ulittle32_t *RelocPtr = nullptr;
FixedStreamArray<FrameData> Frames;
};
class DebugFrameDataSubsection final : public DebugSubsection {
public:
- DebugFrameDataSubsection()
- : DebugSubsection(DebugSubsectionKind::FrameData) {}
+ DebugFrameDataSubsection(bool IncludeRelocPtr)
+ : DebugSubsection(DebugSubsectionKind::FrameData),
+ IncludeRelocPtr(IncludeRelocPtr) {}
static bool classof(const DebugSubsection *S) {
return S->kind() == DebugSubsectionKind::FrameData;
}
@@ -52,6 +55,7 @@ public:
void setFrames(ArrayRef<FrameData> Frames);
private:
+ bool IncludeRelocPtr = false;
std::vector<FrameData> Frames;
};
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/RecordSerialization.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/RecordSerialization.h
index 58449c2c7565..36237e1a4d9e 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/RecordSerialization.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/RecordSerialization.h
@@ -180,26 +180,6 @@ template <typename T> serialize_numeric_impl<T> serialize_numeric(T &Item) {
return serialize_numeric_impl<T>(Item);
}
-// This field is only present in the byte record if the condition is true. The
-// condition is evaluated lazily, so it can depend on items that were
-// deserialized
-// earlier.
-#define CV_CONDITIONAL_FIELD(I, C) \
- serialize_conditional(I, [&]() { return !!(C); })
-
-// This is an array of N items, where N is evaluated lazily, so it can refer
-// to a field deserialized earlier.
-#define CV_ARRAY_FIELD_N(I, N) serialize_array(I, [&]() { return N; })
-
-// This is an array that exhausts the remainder of the input buffer.
-#define CV_ARRAY_FIELD_TAIL(I) serialize_array_tail(I)
-
-// This is an array that consumes null terminated strings until a double null
-// is encountered.
-#define CV_STRING_ARRAY_NULL_TERM(I) serialize_null_term_string_array(I)
-
-#define CV_NUMERIC_FIELD(I) serialize_numeric(I)
-
template <typename T, typename U>
Error consume(BinaryStreamReader &Reader,
const serialize_conditional_impl<T, U> &Item) {
@@ -242,9 +222,6 @@ Error consume(BinaryStreamReader &Reader, T &&X, U &&Y, Args &&... Rest) {
return consume(Reader, Y, std::forward<Args>(Rest)...);
}
-#define CV_DESERIALIZE(...) \
- if (auto EC = consume(__VA_ARGS__)) \
- return std::move(EC);
}
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
index b5479db97a15..6b5dd2d20d17 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
@@ -47,7 +47,7 @@ public:
return Error::success();
}
template <typename T> static Expected<T> deserializeAs(CVSymbol Symbol) {
- T Record(Symbol.kind());
+ T Record(static_cast<SymbolRecordKind>(Symbol.kind()));
if (auto EC = deserializeAs<T>(Symbol, Record))
return std::move(EC);
return Record;
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDumper.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDumper.h
index 293daa851bdd..215da2e2b522 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDumper.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolDumper.h
@@ -27,10 +27,10 @@ class CVSymbolDumper {
public:
CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
CodeViewContainer Container,
- std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
+ std::unique_ptr<SymbolDumpDelegate> ObjDelegate, CPUType CPU,
bool PrintRecordBytes)
: W(W), Types(Types), Container(Container),
- ObjDelegate(std::move(ObjDelegate)),
+ ObjDelegate(std::move(ObjDelegate)), CompilationCPUType(CPU),
PrintRecordBytes(PrintRecordBytes) {}
/// Dumps one type record. Returns false if there was a type parsing error,
@@ -43,12 +43,14 @@ public:
/// parse error, and true otherwise.
Error dump(const CVSymbolArray &Symbols);
+ CPUType getCompilationCPUType() const { return CompilationCPUType; }
+
private:
ScopedPrinter &W;
TypeCollection &Types;
CodeViewContainer Container;
std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
-
+ CPUType CompilationCPUType;
bool PrintRecordBytes;
};
} // end namespace codeview
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecord.h
index 93306824012e..b58825c4a788 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecord.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -358,6 +358,7 @@ public:
// S_PUB32
class PublicSym32 : public SymbolRecord {
public:
+ PublicSym32() : SymbolRecord(SymbolRecordKind::PublicSym32) {}
explicit PublicSym32(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
explicit PublicSym32(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::PublicSym32),
@@ -399,6 +400,7 @@ public:
uint16_t Module;
StringRef Name;
+ uint16_t modi() const { return Module - 1; }
uint32_t RecordOffset;
};
@@ -636,6 +638,7 @@ public:
// S_OBJNAME
class ObjNameSym : public SymbolRecord {
public:
+ explicit ObjNameSym() : SymbolRecord(SymbolRecordKind::ObjNameSym) {}
explicit ObjNameSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
ObjNameSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::ObjNameSym), RecordOffset(RecordOffset) {
@@ -718,6 +721,7 @@ public:
// S_COMPILE3
class Compile3Sym : public SymbolRecord {
public:
+ Compile3Sym() : SymbolRecord(SymbolRecordKind::Compile3Sym) {}
explicit Compile3Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
Compile3Sym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::Compile3Sym),
@@ -739,8 +743,17 @@ public:
Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
}
- uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
- uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
+ SourceLanguage getLanguage() const {
+ return static_cast<SourceLanguage>(static_cast<uint32_t>(Flags) & 0xFF);
+ }
+ CompileSym3Flags getFlags() const {
+ return static_cast<CompileSym3Flags>(static_cast<uint32_t>(Flags) & ~0xFF);
+ }
+
+ bool hasOptimizations() const {
+ return CompileSym3Flags::None !=
+ (getFlags() & (CompileSym3Flags::PGO | CompileSym3Flags::LTCG));
+ }
uint32_t RecordOffset;
};
@@ -761,7 +774,21 @@ public:
uint16_t SectionIdOfExceptionHandler;
FrameProcedureOptions Flags;
+ /// Extract the register this frame uses to refer to local variables.
+ RegisterId getLocalFramePtrReg(CPUType CPU) const {
+ return decodeFramePtrReg(
+ EncodedFramePtrReg((uint32_t(Flags) >> 14U) & 0x3U), CPU);
+ }
+
+ /// Extract the register this frame uses to refer to parameters.
+ RegisterId getParamFramePtrReg(CPUType CPU) const {
+ return decodeFramePtrReg(
+ EncodedFramePtrReg((uint32_t(Flags) >> 16U) & 0x3U), CPU);
+ }
+
uint32_t RecordOffset;
+
+private:
};
// S_CALLSITEINFO
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h
new file mode 100644
index 000000000000..3713fe118eaa
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/SymbolRecordHelpers.h
@@ -0,0 +1,62 @@
+//===- SymbolRecordHelpers.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDHELPERS_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDHELPERS_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+
+namespace llvm {
+namespace codeview {
+/// Return true if this symbol opens a scope. This implies that the symbol has
+/// "parent" and "end" fields, which contain the offset of the S_END or
+/// S_INLINESITE_END record.
+inline bool symbolOpensScope(SymbolKind Kind) {
+ switch (Kind) {
+ case SymbolKind::S_GPROC32:
+ case SymbolKind::S_LPROC32:
+ case SymbolKind::S_LPROC32_ID:
+ case SymbolKind::S_GPROC32_ID:
+ case SymbolKind::S_BLOCK32:
+ case SymbolKind::S_SEPCODE:
+ case SymbolKind::S_THUNK32:
+ case SymbolKind::S_INLINESITE:
+ case SymbolKind::S_INLINESITE2:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/// Return true if this ssymbol ends a scope.
+inline bool symbolEndsScope(SymbolKind Kind) {
+ switch (Kind) {
+ case SymbolKind::S_END:
+ case SymbolKind::S_PROC_ID_END:
+ case SymbolKind::S_INLINESITE_END:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/// Given a symbol P for which symbolOpensScope(P) == true, return the
+/// corresponding end offset.
+uint32_t getScopeEndOffset(const CVSymbol &Symbol);
+uint32_t getScopeParentOffset(const CVSymbol &Symbol);
+
+CVSymbolArray limitSymbolArrayToScope(const CVSymbolArray &Symbols,
+ uint32_t ScopeBegin);
+
+} // namespace codeview
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h
index c71281de7145..58463a6b13df 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -134,6 +134,8 @@ public:
return static_cast<SimpleTypeMode>(Index & SimpleModeMask);
}
+ TypeIndex makeDirect() const { return TypeIndex{getSimpleKind()}; }
+
static TypeIndex None() { return TypeIndex(SimpleTypeKind::None); }
static TypeIndex Void() { return TypeIndex(SimpleTypeKind::Void); }
static TypeIndex VoidPointer32() {
@@ -143,6 +145,13 @@ public:
return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer64);
}
+ static TypeIndex NullptrT() {
+ // std::nullptr_t uses the pointer mode that doesn't indicate bit-width,
+ // presumably because std::nullptr_t is intended to be compatible with any
+ // pointer type.
+ return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer);
+ }
+
static TypeIndex SignedCharacter() {
return TypeIndex(SimpleTypeKind::SignedCharacter);
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
index 61ebdf878ce7..7b4a30ee622d 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -95,6 +95,11 @@ struct MemberAttributes {
return MP == MethodKind::IntroducingVirtual ||
MP == MethodKind::PureIntroducingVirtual;
}
+
+ /// Is this method static.
+ bool isStatic() const {
+ return getMethodKind() == MethodKind::Static;
+ }
};
// Does not correspond to any tag, this is the tail of an LF_POINTER record
@@ -264,14 +269,18 @@ public:
// LF_POINTER
class PointerRecord : public TypeRecord {
public:
+ // ---------------------------XXXXX
static const uint32_t PointerKindShift = 0;
static const uint32_t PointerKindMask = 0x1F;
+ // ------------------------XXX-----
static const uint32_t PointerModeShift = 5;
static const uint32_t PointerModeMask = 0x07;
- static const uint32_t PointerOptionMask = 0xFF;
+ // ----------XXX------XXXXX--------
+ static const uint32_t PointerOptionMask = 0x381f00;
+ // -------------XXXXXX------------
static const uint32_t PointerSizeShift = 13;
static const uint32_t PointerSizeMask = 0xFF;
@@ -305,7 +314,7 @@ public:
}
PointerOptions getOptions() const {
- return static_cast<PointerOptions>(Attrs);
+ return static_cast<PointerOptions>(Attrs & PointerOptionMask);
}
uint8_t getSize() const {
@@ -334,6 +343,14 @@ public:
return !!(Attrs & uint32_t(PointerOptions::Restrict));
}
+ bool isLValueReferenceThisPtr() const {
+ return !!(Attrs & uint32_t(PointerOptions::LValueRefThisPointer));
+ }
+
+ bool isRValueReferenceThisPtr() const {
+ return !!(Attrs & uint32_t(PointerOptions::RValueRefThisPointer));
+ }
+
TypeIndex ReferentType;
uint32_t Attrs;
Optional<MemberPointerInfo> MemberInfo;
@@ -429,6 +446,14 @@ public:
return (Options & ClassOptions::ForwardReference) != ClassOptions::None;
}
+ bool containsNestedClass() const {
+ return (Options & ClassOptions::ContainsNestedClass) != ClassOptions::None;
+ }
+
+ bool isScoped() const {
+ return (Options & ClassOptions::Scoped) != ClassOptions::None;
+ }
+
uint16_t getMemberCount() const { return MemberCount; }
ClassOptions getOptions() const { return Options; }
TypeIndex getFieldList() const { return FieldList; }
@@ -655,7 +680,17 @@ public:
ArrayRef<TypeIndex> getArgs() const { return ArgIndices; }
- SmallVector<TypeIndex, 4> ArgIndices;
+ /// Indices of known build info arguments.
+ enum BuildInfoArg {
+ CurrentDirectory, ///< Absolute CWD path
+ BuildTool, ///< Absolute compiler path
+ SourceFile, ///< Path to main source file, relative or absolute
+ TypeServerPDB, ///< Absolute path of type server PDB (/Fd)
+ CommandLine, ///< Full canonical command line (maybe -cc1)
+ MaxArgs
+ };
+
+ SmallVector<TypeIndex, MaxArgs> ArgIndices;
};
// LF_VFTABLE
@@ -923,6 +958,7 @@ public:
uint32_t Signature;
};
+
} // end namespace codeview
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h
new file mode 100644
index 000000000000..389472ed1aea
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordHelpers.h
@@ -0,0 +1,28 @@
+//===- TypeRecordHelpers.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDHELPERS_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDHELPERS_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+
+namespace llvm {
+ namespace codeview {
+ /// Given an arbitrary codeview type, determine if it is an LF_STRUCTURE,
+ /// LF_CLASS, LF_INTERFACE, LF_UNION, or LF_ENUM with the forward ref class
+ /// option.
+ bool isUdtForwardRef(CVType CVT);
+
+ /// Given a CVType which is assumed to be an LF_MODIFIER, return the
+ /// TypeIndex of the type that the LF_MODIFIER modifies.
+ TypeIndex getModifiedType(const CVType &CVT);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
index 583740d2eb4b..0b9f54ec60bf 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
@@ -83,18 +83,21 @@ Error mergeIdRecords(MergingTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
Error mergeTypeAndIdRecords(MergingTypeTableBuilder &DestIds,
MergingTypeTableBuilder &DestTypes,
SmallVectorImpl<TypeIndex> &SourceToDest,
- const CVTypeArray &IdsAndTypes);
+ const CVTypeArray &IdsAndTypes,
+ Optional<uint32_t> &PCHSignature);
Error mergeTypeAndIdRecords(GlobalTypeTableBuilder &DestIds,
GlobalTypeTableBuilder &DestTypes,
SmallVectorImpl<TypeIndex> &SourceToDest,
const CVTypeArray &IdsAndTypes,
- ArrayRef<GloballyHashedType> Hashes);
+ ArrayRef<GloballyHashedType> Hashes,
+ Optional<uint32_t> &PCHSignature);
Error mergeTypeRecords(GlobalTypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest,
const CVTypeArray &Types,
- ArrayRef<GloballyHashedType> Hashes);
+ ArrayRef<GloballyHashedType> Hashes,
+ Optional<uint32_t> &PCHSignature);
Error mergeIdRecords(GlobalTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
SmallVectorImpl<TypeIndex> &SourceToDest,
diff --git a/contrib/llvm/include/llvm/DebugInfo/DIContext.h b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
index bbdd5e0d9c3f..85e96402a246 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
@@ -81,7 +81,7 @@ class DIInliningInfo {
public:
DIInliningInfo() = default;
- DILineInfo getFrame(unsigned Index) const {
+ const DILineInfo & getFrame(unsigned Index) const {
assert(Index < Frames.size());
return Frames[Index];
}
@@ -98,6 +98,11 @@ public:
void addFrame(const DILineInfo &Frame) {
Frames.push_back(Frame);
}
+
+ void resize(unsigned i) {
+ Frames.resize(i);
+ }
+
};
/// Container for description of a global variable.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
index c219ca75e640..33797419a7b8 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
@@ -18,20 +18,20 @@ namespace llvm {
class DWARFCompileUnit : public DWARFUnit {
public:
DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
- const DWARFUnitHeader &Header,
- const DWARFDebugAbbrev *DA, const DWARFSection *RS,
+ const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
+ const DWARFSection *RS, const DWARFSection *LocSection,
StringRef SS, const DWARFSection &SOS,
const DWARFSection *AOS, const DWARFSection &LS, bool LE,
- bool IsDWO, const DWARFUnitSectionBase &UnitSection)
- : DWARFUnit(Context, Section, Header, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
- UnitSection) {}
+ bool IsDWO, const DWARFUnitVector &UnitVector)
+ : DWARFUnit(Context, Section, Header, DA, RS, LocSection, SS, SOS, AOS,
+ LS, LE, IsDWO, UnitVector) {}
- // VTable anchor.
+ /// VTable anchor.
~DWARFCompileUnit() override;
-
- void dump(raw_ostream &OS, DIDumpOptions DumpOpts);
-
- static const DWARFSectionKind Section = DW_SECT_INFO;
+ /// Dump this compile unit to \p OS.
+ void dump(raw_ostream &OS, DIDumpOptions DumpOpts) override;
+ /// Enable LLVM-style RTTI.
+ static bool classof(const DWARFUnit *U) { return !U->isTypeUnit(); }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
index f5419fe02421..dbb6be04544b 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -57,8 +57,7 @@ enum class ErrorPolicy { Halt, Continue };
/// This data structure is the top level entity that deals with dwarf debug
/// information parsing. The actual data is supplied through DWARFObj.
class DWARFContext : public DIContext {
- DWARFUnitSection<DWARFCompileUnit> CUs;
- std::deque<DWARFUnitSection<DWARFTypeUnit>> TUs;
+ DWARFUnitVector NormalUnits;
std::unique_ptr<DWARFUnitIndex> CUIndex;
std::unique_ptr<DWARFGdbIndex> GdbIndex;
std::unique_ptr<DWARFUnitIndex> TUIndex;
@@ -75,10 +74,9 @@ class DWARFContext : public DIContext {
std::unique_ptr<AppleAcceleratorTable> AppleNamespaces;
std::unique_ptr<AppleAcceleratorTable> AppleObjC;
- DWARFUnitSection<DWARFCompileUnit> DWOCUs;
- std::deque<DWARFUnitSection<DWARFTypeUnit>> DWOTUs;
+ DWARFUnitVector DWOUnits;
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
- std::unique_ptr<DWARFDebugLocDWO> LocDWO;
+ std::unique_ptr<DWARFDebugLoclists> LocDWO;
/// The maximum DWARF version of all units.
unsigned MaxVersion = 0;
@@ -95,22 +93,17 @@ class DWARFContext : public DIContext {
std::unique_ptr<MCRegisterInfo> RegInfo;
/// Read compile units from the debug_info section (if necessary)
- /// and store them in CUs.
- void parseCompileUnits();
-
- /// Read type units from the debug_types sections (if necessary)
- /// and store them in TUs.
- void parseTypeUnits();
+ /// and type units from the debug_types sections (if necessary)
+ /// and store them in NormalUnits.
+ void parseNormalUnits();
/// Read compile units from the debug_info.dwo section (if necessary)
- /// and store them in DWOCUs.
- void parseDWOCompileUnits();
-
- /// Read type units from the debug_types.dwo section (if necessary)
- /// and store them in DWOTUs.
- void parseDWOTypeUnits();
+ /// and type units from the debug_types.dwo section (if necessary)
+ /// and store them in DWOUnits.
+ /// If \p Lazy is true, set up to parse but don't actually parse them.
+ enum { EagerParse = false, LazyParse = true };
+ void parseDWOUnits(bool Lazy = false);
-protected:
std::unique_ptr<const DWARFObject> DObj;
public:
@@ -139,68 +132,95 @@ public:
bool verify(raw_ostream &OS, DIDumpOptions DumpOpts = {}) override;
- using cu_iterator_range = DWARFUnitSection<DWARFCompileUnit>::iterator_range;
- using tu_iterator_range = DWARFUnitSection<DWARFTypeUnit>::iterator_range;
- using tu_section_iterator_range = iterator_range<decltype(TUs)::iterator>;
+ using unit_iterator_range = DWARFUnitVector::iterator_range;
- /// Get compile units in this context.
- cu_iterator_range compile_units() {
- parseCompileUnits();
- return cu_iterator_range(CUs.begin(), CUs.end());
+ /// Get units from .debug_info in this context.
+ unit_iterator_range info_section_units() {
+ parseNormalUnits();
+ return unit_iterator_range(NormalUnits.begin(),
+ NormalUnits.begin() +
+ NormalUnits.getNumInfoUnits());
}
+ /// Get units from .debug_types in this context.
+ unit_iterator_range types_section_units() {
+ parseNormalUnits();
+ return unit_iterator_range(
+ NormalUnits.begin() + NormalUnits.getNumInfoUnits(), NormalUnits.end());
+ }
+
+ /// Get compile units in this context.
+ unit_iterator_range compile_units() { return info_section_units(); }
+
/// Get type units in this context.
- tu_section_iterator_range type_unit_sections() {
- parseTypeUnits();
- return tu_section_iterator_range(TUs.begin(), TUs.end());
+ unit_iterator_range type_units() { return types_section_units(); }
+
+ /// Get all normal compile/type units in this context.
+ unit_iterator_range normal_units() {
+ parseNormalUnits();
+ return unit_iterator_range(NormalUnits.begin(), NormalUnits.end());
}
- /// Get compile units in the DWO context.
- cu_iterator_range dwo_compile_units() {
- parseDWOCompileUnits();
- return cu_iterator_range(DWOCUs.begin(), DWOCUs.end());
+ /// Get units from .debug_info..dwo in the DWO context.
+ unit_iterator_range dwo_info_section_units() {
+ parseDWOUnits();
+ return unit_iterator_range(DWOUnits.begin(),
+ DWOUnits.begin() + DWOUnits.getNumInfoUnits());
+ }
+
+ /// Get units from .debug_types.dwo in the DWO context.
+ unit_iterator_range dwo_types_section_units() {
+ parseDWOUnits();
+ return unit_iterator_range(DWOUnits.begin() + DWOUnits.getNumInfoUnits(),
+ DWOUnits.end());
}
+ /// Get compile units in the DWO context.
+ unit_iterator_range dwo_compile_units() { return dwo_info_section_units(); }
+
/// Get type units in the DWO context.
- tu_section_iterator_range dwo_type_unit_sections() {
- parseDWOTypeUnits();
- return tu_section_iterator_range(DWOTUs.begin(), DWOTUs.end());
+ unit_iterator_range dwo_type_units() { return dwo_types_section_units(); }
+
+ /// Get all units in the DWO context.
+ unit_iterator_range dwo_units() {
+ parseDWOUnits();
+ return unit_iterator_range(DWOUnits.begin(), DWOUnits.end());
}
/// Get the number of compile units in this context.
unsigned getNumCompileUnits() {
- parseCompileUnits();
- return CUs.size();
+ parseNormalUnits();
+ return NormalUnits.getNumInfoUnits();
}
- /// Get the number of compile units in this context.
+ /// Get the number of type units in this context.
unsigned getNumTypeUnits() {
- parseTypeUnits();
- return TUs.size();
+ parseNormalUnits();
+ return NormalUnits.getNumTypesUnits();
}
/// Get the number of compile units in the DWO context.
unsigned getNumDWOCompileUnits() {
- parseDWOCompileUnits();
- return DWOCUs.size();
+ parseDWOUnits();
+ return DWOUnits.getNumInfoUnits();
}
- /// Get the number of compile units in the DWO context.
+ /// Get the number of type units in the DWO context.
unsigned getNumDWOTypeUnits() {
- parseDWOTypeUnits();
- return DWOTUs.size();
+ parseDWOUnits();
+ return DWOUnits.getNumTypesUnits();
}
- /// Get the compile unit at the specified index for this compile unit.
- DWARFCompileUnit *getCompileUnitAtIndex(unsigned index) {
- parseCompileUnits();
- return CUs[index].get();
+ /// Get the unit at the specified index.
+ DWARFUnit *getUnitAtIndex(unsigned index) {
+ parseNormalUnits();
+ return NormalUnits[index].get();
}
- /// Get the compile unit at the specified index for the DWO compile units.
- DWARFCompileUnit *getDWOCompileUnitAtIndex(unsigned index) {
- parseDWOCompileUnits();
- return DWOCUs[index].get();
+ /// Get the unit at the specified index for the DWO units.
+ DWARFUnit *getDWOUnitAtIndex(unsigned index) {
+ parseDWOUnits();
+ return DWOUnits[index].get();
}
DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
@@ -211,7 +231,17 @@ public:
/// Get a DIE given an exact offset.
DWARFDie getDIEForOffset(uint32_t Offset);
- unsigned getMaxVersion() const { return MaxVersion; }
+ unsigned getMaxVersion() {
+ // Ensure info units have been parsed to discover MaxVersion
+ info_section_units();
+ return MaxVersion;
+ }
+
+ unsigned getMaxDWOVersion() {
+ // Ensure DWO info units have been parsed to discover MaxVersion
+ dwo_info_section_units();
+ return MaxVersion;
+ }
void setMaxVersionIfGreater(unsigned Version) {
if (Version > MaxVersion)
@@ -232,7 +262,7 @@ public:
const DWARFDebugAbbrev *getDebugAbbrevDWO();
/// Get a pointer to the parsed DebugLoc object.
- const DWARFDebugLocDWO *getDebugLocDWO();
+ const DWARFDebugLoclists *getDebugLocDWO();
/// Get a pointer to the parsed DebugAranges object.
const DWARFDebugAranges *getDebugAranges();
@@ -327,6 +357,13 @@ public:
/// TODO: refactor compile_units() to make this const.
uint8_t getCUAddrSize();
+ /// Dump Error as warning message to stderr.
+ static void dumpWarning(Error Warning);
+
+ Triple::ArchType getArch() const {
+ return getDWARFObj().getFile()->getArch();
+ }
+
private:
/// Return the compile unit which contains instruction with provided
/// address.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
index ff1c7fb38389..7dc07d774aba 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/DebugInfo/DWARF/DWARFExpression.h"
#include "llvm/Support/Error.h"
@@ -59,9 +60,11 @@ public:
unsigned size() const { return (unsigned)Instructions.size(); }
bool empty() const { return Instructions.empty(); }
- CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor)
+ CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor,
+ Triple::ArchType Arch)
: CodeAlignmentFactor(CodeAlignmentFactor),
- DataAlignmentFactor(DataAlignmentFactor) {}
+ DataAlignmentFactor(DataAlignmentFactor),
+ Arch(Arch) {}
/// Parse and store a sequence of CFI instructions from Data,
/// starting at *Offset and ending at EndOffset. *Offset is updated
@@ -76,6 +79,7 @@ private:
std::vector<Instruction> Instructions;
const uint64_t CodeAlignmentFactor;
const int64_t DataAlignmentFactor;
+ Triple::ArchType Arch;
/// Convenience method to add a new instruction with the given opcode.
void addInstruction(uint8_t Opcode) {
@@ -130,8 +134,9 @@ public:
enum FrameKind { FK_CIE, FK_FDE };
FrameEntry(FrameKind K, uint64_t Offset, uint64_t Length, uint64_t CodeAlign,
- int64_t DataAlign)
- : Kind(K), Offset(Offset), Length(Length), CFIs(CodeAlign, DataAlign) {}
+ int64_t DataAlign, Triple::ArchType Arch)
+ : Kind(K), Offset(Offset), Length(Length),
+ CFIs(CodeAlign, DataAlign, Arch) {}
virtual ~FrameEntry() {}
@@ -168,9 +173,9 @@ public:
int64_t DataAlignmentFactor, uint64_t ReturnAddressRegister,
SmallString<8> AugmentationData, uint32_t FDEPointerEncoding,
uint32_t LSDAPointerEncoding, Optional<uint64_t> Personality,
- Optional<uint32_t> PersonalityEnc)
+ Optional<uint32_t> PersonalityEnc, Triple::ArchType Arch)
: FrameEntry(FK_CIE, Offset, Length, CodeAlignmentFactor,
- DataAlignmentFactor),
+ DataAlignmentFactor, Arch),
Version(Version), Augmentation(std::move(Augmentation)),
AddressSize(AddressSize), SegmentDescriptorSize(SegmentDescriptorSize),
CodeAlignmentFactor(CodeAlignmentFactor),
@@ -224,10 +229,11 @@ public:
// is obtained lazily once it's actually required.
FDE(uint64_t Offset, uint64_t Length, int64_t LinkedCIEOffset,
uint64_t InitialLocation, uint64_t AddressRange, CIE *Cie,
- Optional<uint64_t> LSDAAddress)
+ Optional<uint64_t> LSDAAddress, Triple::ArchType Arch)
: FrameEntry(FK_FDE, Offset, Length,
Cie ? Cie->getCodeAlignmentFactor() : 0,
- Cie ? Cie->getDataAlignmentFactor() : 0),
+ Cie ? Cie->getDataAlignmentFactor() : 0,
+ Arch),
LinkedCIEOffset(LinkedCIEOffset), InitialLocation(InitialLocation),
AddressRange(AddressRange), LinkedCIE(Cie), LSDAAddress(LSDAAddress) {}
@@ -256,6 +262,7 @@ private:
/// A parsed .debug_frame or .eh_frame section
class DWARFDebugFrame {
+ const Triple::ArchType Arch;
// True if this is parsing an eh_frame section.
const bool IsEH;
// Not zero for sane pointer values coming out of eh_frame
@@ -272,7 +279,8 @@ public:
// it is a .debug_frame section. EHFrameAddress should be different
// than zero for correct parsing of .eh_frame addresses when they
// use a PC-relative encoding.
- DWARFDebugFrame(bool IsEH = false, uint64_t EHFrameAddress = 0);
+ DWARFDebugFrame(Triple::ArchType Arch,
+ bool IsEH = false, uint64_t EHFrameAddress = 0);
~DWARFDebugFrame();
/// Dump the section data into the given stream.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
index 5b2af34bbcf5..d50af5a057f1 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
@@ -247,10 +247,11 @@ public:
void clear();
/// Parse prologue and all rows.
- Error parse(DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr,
- const DWARFContext &Ctx, const DWARFUnit *U,
- std::function<void(Error)> RecoverableErrorCallback = warn,
- raw_ostream *OS = nullptr);
+ Error parse(
+ DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr,
+ const DWARFContext &Ctx, const DWARFUnit *U,
+ std::function<void(Error)> RecoverableErrorCallback,
+ raw_ostream *OS = nullptr);
using RowVector = std::vector<Row>;
using RowIter = RowVector::const_iterator;
@@ -273,14 +274,13 @@ public:
Expected<const LineTable *> getOrParseLineTable(
DWARFDataExtractor &DebugLineData, uint32_t Offset,
const DWARFContext &Ctx, const DWARFUnit *U,
- std::function<void(Error)> RecoverableErrorCallback = warn);
+ std::function<void(Error)> RecoverableErrorCallback);
/// Helper to allow for parsing of an entire .debug_line section in sequence.
class SectionParser {
public:
- using cu_range = DWARFUnitSection<DWARFCompileUnit>::iterator_range;
- using tu_range =
- iterator_range<std::deque<DWARFUnitSection<DWARFTypeUnit>>::iterator>;
+ using cu_range = DWARFUnitVector::iterator_range;
+ using tu_range = DWARFUnitVector::iterator_range;
using LineToUnitMap = std::map<uint64_t, DWARFUnit *>;
SectionParser(DWARFDataExtractor &Data, const DWARFContext &C, cu_range CUs,
@@ -296,16 +296,17 @@ public:
/// \param OS - if not null, the parser will print information about the
/// table as it parses it.
LineTable
- parseNext(function_ref<void(Error)> RecoverableErrorCallback = warn,
- function_ref<void(Error)> UnrecoverableErrorCallback = warn,
- raw_ostream *OS = nullptr);
+ parseNext(
+ function_ref<void(Error)> RecoverableErrorCallback,
+ function_ref<void(Error)> UnrecoverableErrorCallback,
+ raw_ostream *OS = nullptr);
/// Skip the current line table and go to the following line table (if
/// present) immediately.
///
/// \param ErrorCallback - report any prologue parsing issues via this
/// callback.
- void skip(function_ref<void(Error)> ErrorCallback = warn);
+ void skip(function_ref<void(Error)> ErrorCallback);
/// Indicates if the parser has parsed as much as possible.
///
@@ -328,12 +329,6 @@ public:
bool Done = false;
};
- /// Helper function for DWARFDebugLine parse functions, to report issues
- /// identified during parsing.
- ///
- /// \param Err The Error to report.
- static void warn(Error Err);
-
private:
struct ParsingState {
ParsingState(struct LineTable *LT);
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
index 9a73745fb6b4..da2098e15402 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
@@ -73,19 +73,21 @@ public:
uint32_t *Offset);
};
-class DWARFDebugLocDWO {
+class DWARFDebugLoclists {
public:
struct Entry {
- uint64_t Start;
- uint32_t Length;
+ uint8_t Kind;
+ uint64_t Value0;
+ uint64_t Value1;
SmallVector<char, 4> Loc;
};
struct LocationList {
unsigned Offset;
SmallVector<Entry, 2> Entries;
- void dump(raw_ostream &OS, bool IsLittleEndian, unsigned AddressSize,
- const MCRegisterInfo *RegInfo, unsigned Indent) const;
+ void dump(raw_ostream &OS, uint64_t BaseAddr, bool IsLittleEndian,
+ unsigned AddressSize, const MCRegisterInfo *RegInfo,
+ unsigned Indent) const;
};
private:
@@ -98,15 +100,15 @@ private:
bool IsLittleEndian;
public:
- void parse(DataExtractor data);
- void dump(raw_ostream &OS, const MCRegisterInfo *RegInfo,
+ void parse(DataExtractor data, unsigned Version);
+ void dump(raw_ostream &OS, uint64_t BaseAddr, const MCRegisterInfo *RegInfo,
Optional<uint64_t> Offset) const;
/// Return the location list at the given offset or nullptr.
LocationList const *getLocationListAtOffset(uint64_t Offset) const;
- static Optional<LocationList> parseOneLocationList(DataExtractor Data,
- uint32_t *Offset);
+ static Optional<LocationList>
+ parseOneLocationList(DataExtractor Data, unsigned *Offset, unsigned Version);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
index cae4804e61d3..9e1656eb1615 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFObject.h"
#include <cstdint>
#include <vector>
@@ -67,7 +68,8 @@ private:
bool GnuStyle;
public:
- DWARFDebugPubTable(StringRef Data, bool LittleEndian, bool GnuStyle);
+ DWARFDebugPubTable(const DWARFObject &Obj, const DWARFSection &Sec,
+ bool LittleEndian, bool GnuStyle);
void dump(raw_ostream &OS) const;
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
index ce7436d9faa3..bc26edf00647 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
@@ -18,7 +18,6 @@
namespace llvm {
-struct BaseAddress;
class raw_ostream;
class DWARFDebugRangeList {
@@ -78,7 +77,7 @@ public:
/// list. Has to be passed base address of the compile unit referencing this
/// range list.
DWARFAddressRangesVector
- getAbsoluteRanges(llvm::Optional<BaseAddress> BaseAddr) const;
+ getAbsoluteRanges(llvm::Optional<SectionedAddress> BaseAddr) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h
index e2e8ab5ed219..5cc8d789e598 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRnglists.h
@@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_DWARFDEBUGRNGLISTS_H
#define LLVM_DEBUGINFO_DWARFDEBUGRNGLISTS_H
+#include "llvm/ADT/Optional.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
@@ -23,6 +24,7 @@ namespace llvm {
class Error;
class raw_ostream;
+class DWARFUnit;
/// A class representing a single range list entry.
struct RangeListEntry : public DWARFListEntryBase {
@@ -35,7 +37,9 @@ struct RangeListEntry : public DWARFListEntryBase {
Error extract(DWARFDataExtractor Data, uint32_t End, uint32_t *OffsetPtr);
void dump(raw_ostream &OS, uint8_t AddrSize, uint8_t MaxEncodingStringLength,
- uint64_t &CurrentBase, DIDumpOptions DumpOpts) const;
+ uint64_t &CurrentBase, DIDumpOptions DumpOpts,
+ llvm::function_ref<Optional<SectionedAddress>(uint32_t)>
+ LookupPooledAddress) const;
bool isSentinel() const { return EntryKind == dwarf::DW_RLE_end_of_list; }
};
@@ -44,7 +48,8 @@ class DWARFDebugRnglist : public DWARFListType<RangeListEntry> {
public:
/// Build a DWARFAddressRangesVector from a rangelist.
DWARFAddressRangesVector
- getAbsoluteRanges(llvm::Optional<BaseAddress> BaseAddr) const;
+ getAbsoluteRanges(llvm::Optional<SectionedAddress> BaseAddr,
+ DWARFUnit &U) const;
};
class DWARFDebugRnglistTable : public DWARFListTableBase<DWARFDebugRnglist> {
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
index c77034f6348f..56d46cd739a2 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -180,6 +180,7 @@ public:
/// \returns a valid DWARFDie instance if the attribute exists, or an invalid
/// DWARFDie object if it doesn't.
DWARFDie getAttributeValueAsReferencedDie(dwarf::Attribute Attr) const;
+ DWARFDie getAttributeValueAsReferencedDie(const DWARFFormValue &V) const;
/// Extract the range base attribute from this DIE as absolute section offset.
///
@@ -404,6 +405,10 @@ public:
Die = Die.getPreviousSibling();
}
+ llvm::DWARFDie::iterator base() const {
+ return llvm::DWARFDie::iterator(AtEnd ? Die : Die.getSibling());
+ }
+
reverse_iterator<llvm::DWARFDie::iterator> &operator++() {
assert(!AtEnd && "Incrementing rend");
llvm::DWARFDie D = Die.getPreviousSibling();
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index 1b5f71c946f9..727e853c09fb 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -61,7 +61,6 @@ public:
dwarf::Form getForm() const { return Form; }
uint64_t getRawUValue() const { return Value.uval; }
- uint64_t getSectionIndex() const { return Value.SectionIndex; }
void setForm(dwarf::Form F) { Form = F; }
void setUValue(uint64_t V) { Value.uval = V; }
void setSValue(int64_t V) { Value.sval = V; }
@@ -75,6 +74,10 @@ public:
bool isFormClass(FormClass FC) const;
const DWARFUnit *getUnit() const { return U; }
void dump(raw_ostream &OS, DIDumpOptions DumpOpts = DIDumpOptions()) const;
+ void dumpSectionedAddress(raw_ostream &OS, DIDumpOptions DumpOpts,
+ SectionedAddress SA) const;
+ static void dumpAddressSection(const DWARFObject &Obj, raw_ostream &OS,
+ DIDumpOptions DumpOpts, uint64_t SectionIndex);
/// Extracts a value in \p Data at offset \p *OffsetPtr. The information
/// in \p FormParams is needed to interpret some forms. The optional
@@ -101,6 +104,7 @@ public:
Optional<int64_t> getAsSignedConstant() const;
Optional<const char *> getAsCString() const;
Optional<uint64_t> getAsAddress() const;
+ Optional<SectionedAddress> getAsSectionedAddress() const;
Optional<uint64_t> getAsSectionOffset() const;
Optional<ArrayRef<uint8_t>> getAsBlock() const;
Optional<uint64_t> getAsCStringOffset() const;
@@ -238,6 +242,13 @@ inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue> &V) {
return None;
}
+inline Optional<SectionedAddress>
+toSectionedAddress(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsSectionedAddress();
+ return None;
+}
+
/// Take an optional DWARFFormValue and extract a address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
index 8d1ac5c83c23..073e02903c39 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
@@ -24,6 +24,7 @@ class DWARFGdbIndex {
uint32_t Version;
uint32_t CuListOffset;
+ uint32_t TuListOffset;
uint32_t AddressAreaOffset;
uint32_t SymbolTableOffset;
uint32_t ConstantPoolOffset;
@@ -34,6 +35,13 @@ class DWARFGdbIndex {
};
SmallVector<CompUnitEntry, 0> CuList;
+ struct TypeUnitEntry {
+ uint64_t Offset;
+ uint64_t TypeOffset;
+ uint64_t TypeSignature;
+ };
+ SmallVector<TypeUnitEntry, 0> TuList;
+
struct AddressEntry {
uint64_t LowAddress; /// The low address.
uint64_t HighAddress; /// The high address.
@@ -55,6 +63,7 @@ class DWARFGdbIndex {
uint32_t StringPoolOffset;
void dumpCUList(raw_ostream &OS) const;
+ void dumpTUList(raw_ostream &OS) const;
void dumpAddressArea(raw_ostream &OS) const;
void dumpSymbolTable(raw_ostream &OS) const;
void dumpConstantPool(raw_ostream &OS) const;
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFListTable.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFListTable.h
index ab12f3bc08b0..9b987314f209 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFListTable.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFListTable.h
@@ -13,6 +13,7 @@
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -43,10 +44,6 @@ protected:
ListEntries Entries;
public:
- // FIXME: We need to consolidate the various verions of "createError"
- // that are used in the DWARF consumer. Until then, this is a workaround.
- Error createError(const char *, const char *, uint32_t);
-
const ListEntries &getEntries() const { return Entries; }
bool empty() const { return Entries.empty(); }
void clear() { Entries.clear(); }
@@ -102,6 +99,7 @@ public:
uint32_t getHeaderOffset() const { return HeaderOffset; }
uint8_t getAddrSize() const { return HeaderData.AddrSize; }
uint32_t getLength() const { return HeaderData.Length; }
+ uint16_t getVersion() const { return HeaderData.Version; }
StringRef getSectionName() const { return SectionName; }
StringRef getListTypeString() const { return ListTypeString; }
dwarf::DwarfFormat getFormat() const { return Format; }
@@ -159,7 +157,10 @@ public:
uint32_t getHeaderOffset() const { return Header.getHeaderOffset(); }
uint8_t getAddrSize() const { return Header.getAddrSize(); }
- void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {}) const;
+ void dump(raw_ostream &OS,
+ llvm::function_ref<Optional<SectionedAddress>(uint32_t)>
+ LookupPooledAddress,
+ DIDumpOptions DumpOpts = {}) const;
/// Return the contents of the offset entry designated by a given index.
Optional<uint32_t> getOffsetEntry(uint32_t Index) const {
@@ -213,7 +214,8 @@ Error DWARFListType<ListEntryType>::extract(DWARFDataExtractor Data,
StringRef SectionName,
StringRef ListTypeString) {
if (*OffsetPtr < HeaderOffset || *OffsetPtr >= End)
- return createError("invalid %s list offset 0x%" PRIx32,
+ return createStringError(errc::invalid_argument,
+ "invalid %s list offset 0x%" PRIx32,
ListTypeString.data(), *OffsetPtr);
Entries.clear();
while (*OffsetPtr < End) {
@@ -224,14 +226,18 @@ Error DWARFListType<ListEntryType>::extract(DWARFDataExtractor Data,
if (Entry.isSentinel())
return Error::success();
}
- return createError("no end of list marker detected at end of %s table "
+ return createStringError(errc::illegal_byte_sequence,
+ "no end of list marker detected at end of %s table "
"starting at offset 0x%" PRIx32,
SectionName.data(), HeaderOffset);
}
template <typename DWARFListType>
-void DWARFListTableBase<DWARFListType>::dump(raw_ostream &OS,
- DIDumpOptions DumpOpts) const {
+void DWARFListTableBase<DWARFListType>::dump(
+ raw_ostream &OS,
+ llvm::function_ref<Optional<SectionedAddress>(uint32_t)>
+ LookupPooledAddress,
+ DIDumpOptions DumpOpts) const {
Header.dump(OS, DumpOpts);
OS << HeaderString << "\n";
@@ -250,7 +256,7 @@ void DWARFListTableBase<DWARFListType>::dump(raw_ostream &OS,
for (const auto &List : ListMap)
for (const auto &Entry : List.second.getEntries())
Entry.dump(OS, getAddrSize(), MaxEncodingStringLength, CurrentBase,
- DumpOpts);
+ DumpOpts, LookupPooledAddress);
}
template <typename DWARFListType>
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFObject.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFObject.h
index 6e8f370f4aea..d611b5d075c8 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFObject.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFObject.h
@@ -33,11 +33,13 @@ public:
virtual ArrayRef<SectionName> getSectionNames() const { return {}; }
virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const { llvm_unreachable("unimplemented"); }
- virtual const DWARFSection &getInfoSection() const { return Dummy; }
+ virtual void
+ forEachInfoSections(function_ref<void(const DWARFSection &)> F) const {}
virtual void
forEachTypesSections(function_ref<void(const DWARFSection &)> F) const {}
virtual StringRef getAbbrevSection() const { return ""; }
virtual const DWARFSection &getLocSection() const { return Dummy; }
+ virtual const DWARFSection &getLoclistsSection() const { return Dummy; }
virtual StringRef getARangeSection() const { return ""; }
virtual StringRef getDebugFrameSection() const { return ""; }
virtual StringRef getEHFrameSection() const { return ""; }
@@ -47,12 +49,13 @@ public:
virtual const DWARFSection &getRangeSection() const { return Dummy; }
virtual const DWARFSection &getRnglistsSection() const { return Dummy; }
virtual StringRef getMacinfoSection() const { return ""; }
- virtual StringRef getPubNamesSection() const { return ""; }
- virtual StringRef getPubTypesSection() const { return ""; }
- virtual StringRef getGnuPubNamesSection() const { return ""; }
- virtual StringRef getGnuPubTypesSection() const { return ""; }
+ virtual const DWARFSection &getPubNamesSection() const { return Dummy; }
+ virtual const DWARFSection &getPubTypesSection() const { return Dummy; }
+ virtual const DWARFSection &getGnuPubNamesSection() const { return Dummy; }
+ virtual const DWARFSection &getGnuPubTypesSection() const { return Dummy; }
virtual const DWARFSection &getStringOffsetSection() const { return Dummy; }
- virtual const DWARFSection &getInfoDWOSection() const { return Dummy; }
+ virtual void
+ forEachInfoDWOSections(function_ref<void(const DWARFSection &)> F) const {}
virtual void
forEachTypesDWOSections(function_ref<void(const DWARFSection &)> F) const {}
virtual StringRef getAbbrevDWOSection() const { return ""; }
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h
index 77045f0794ae..7f8235965297 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h
@@ -23,6 +23,11 @@ struct SectionName {
bool IsNameUnique;
};
+struct SectionedAddress {
+ uint64_t Address;
+ uint64_t SectionIndex;
+};
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
index cb5a78ee3dbf..8ca5ba13fc23 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
@@ -26,19 +26,20 @@ class raw_ostream;
class DWARFTypeUnit : public DWARFUnit {
public:
DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
- const DWARFUnitHeader &Header,
- const DWARFDebugAbbrev *DA, const DWARFSection *RS,
+ const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
+ const DWARFSection *RS, const DWARFSection *LocSection,
StringRef SS, const DWARFSection &SOS, const DWARFSection *AOS,
const DWARFSection &LS, bool LE, bool IsDWO,
- const DWARFUnitSectionBase &UnitSection)
- : DWARFUnit(Context, Section, Header, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
- UnitSection) {}
+ const DWARFUnitVector &UnitVector)
+ : DWARFUnit(Context, Section, Header, DA, RS, LocSection, SS, SOS, AOS,
+ LS, LE, IsDWO, UnitVector) {}
uint64_t getTypeHash() const { return getHeader().getTypeHash(); }
uint32_t getTypeOffset() const { return getHeader().getTypeOffset(); }
- void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {});
- static const DWARFSectionKind Section = DW_SECT_TYPES;
+ void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {}) override;
+ // Enable LLVM-style RTTI.
+ static bool classof(const DWARFUnit *U) { return U->isTypeUnit(); }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 988a7958184c..79c3ce1106d5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -72,7 +72,8 @@ public:
/// Parse a unit header from \p debug_info starting at \p offset_ptr.
bool extract(DWARFContext &Context, const DWARFDataExtractor &debug_info,
uint32_t *offset_ptr, DWARFSectionKind Kind = DW_SECT_INFO,
- const DWARFUnitIndex *Index = nullptr);
+ const DWARFUnitIndex *Index = nullptr,
+ const DWARFUnitIndex::Entry *Entry = nullptr);
uint32_t getOffset() const { return Offset; }
const dwarf::FormParams &getFormParams() const { return FormParams; }
uint16_t getVersion() const { return FormParams.Version; }
@@ -101,133 +102,66 @@ public:
uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
};
-/// Base class for all DWARFUnitSection classes. This provides the
-/// functionality common to all unit types.
-class DWARFUnitSectionBase {
-public:
- /// Returns the Unit that contains the given section offset in the
- /// same section this Unit originated from.
- virtual DWARFUnit *getUnitForOffset(uint32_t Offset) const = 0;
- virtual DWARFUnit *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E) = 0;
-
- void parse(DWARFContext &C, const DWARFSection &Section);
- void parseDWO(DWARFContext &C, const DWARFSection &DWOSection,
- bool Lazy = false);
-
-protected:
- ~DWARFUnitSectionBase() = default;
-
- virtual void parseImpl(DWARFContext &Context, const DWARFObject &Obj,
- const DWARFSection &Section,
- const DWARFDebugAbbrev *DA, const DWARFSection *RS,
- StringRef SS, const DWARFSection &SOS,
- const DWARFSection *AOS, const DWARFSection &LS,
- bool isLittleEndian, bool isDWO, bool Lazy) = 0;
-};
-
const DWARFUnitIndex &getDWARFUnitIndex(DWARFContext &Context,
DWARFSectionKind Kind);
-/// Concrete instance of DWARFUnitSection, specialized for one Unit type.
-template<typename UnitType>
-class DWARFUnitSection final : public SmallVector<std::unique_ptr<UnitType>, 1>,
- public DWARFUnitSectionBase {
- bool Parsed = false;
- std::function<std::unique_ptr<UnitType>(uint32_t)> Parser;
+/// Describe a collection of units. Intended to hold all units either from
+/// .debug_info and .debug_types, or from .debug_info.dwo and .debug_types.dwo.
+class DWARFUnitVector final : public SmallVector<std::unique_ptr<DWARFUnit>, 1> {
+ std::function<std::unique_ptr<DWARFUnit>(uint32_t, DWARFSectionKind,
+ const DWARFSection *,
+ const DWARFUnitIndex::Entry *)>
+ Parser;
+ int NumInfoUnits = -1;
public:
- using UnitVector = SmallVectorImpl<std::unique_ptr<UnitType>>;
+ using UnitVector = SmallVectorImpl<std::unique_ptr<DWARFUnit>>;
using iterator = typename UnitVector::iterator;
using iterator_range = llvm::iterator_range<typename UnitVector::iterator>;
- UnitType *getUnitForOffset(uint32_t Offset) const override {
- auto *CU = std::upper_bound(
- this->begin(), this->end(), Offset,
- [](uint32_t LHS, const std::unique_ptr<UnitType> &RHS) {
- return LHS < RHS->getNextUnitOffset();
- });
- if (CU != this->end() && (*CU)->getOffset() <= Offset)
- return CU->get();
- return nullptr;
- }
- UnitType *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E) override {
- const auto *CUOff = E.getOffset(DW_SECT_INFO);
- if (!CUOff)
- return nullptr;
-
- auto Offset = CUOff->Offset;
-
- auto *CU = std::upper_bound(
- this->begin(), this->end(), CUOff->Offset,
- [](uint32_t LHS, const std::unique_ptr<UnitType> &RHS) {
- return LHS < RHS->getNextUnitOffset();
- });
- if (CU != this->end() && (*CU)->getOffset() <= Offset)
- return CU->get();
-
- if (!Parser)
- return nullptr;
-
- auto U = Parser(Offset);
- if (!U)
- U = nullptr;
-
- auto *NewCU = U.get();
- this->insert(CU, std::move(U));
- return NewCU;
+ DWARFUnit *getUnitForOffset(uint32_t Offset) const;
+ DWARFUnit *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E);
+
+ /// Read units from a .debug_info or .debug_types section. Calls made
+ /// before finishedInfoUnits() are assumed to be for .debug_info sections,
+ /// calls after finishedInfoUnits() are for .debug_types sections. Caller
+ /// must not mix calls to addUnitsForSection and addUnitsForDWOSection.
+ void addUnitsForSection(DWARFContext &C, const DWARFSection &Section,
+ DWARFSectionKind SectionKind);
+ /// Read units from a .debug_info.dwo or .debug_types.dwo section. Calls
+ /// made before finishedInfoUnits() are assumed to be for .debug_info.dwo
+ /// sections, calls after finishedInfoUnits() are for .debug_types.dwo
+ /// sections. Caller must not mix calls to addUnitsForSection and
+ /// addUnitsForDWOSection.
+ void addUnitsForDWOSection(DWARFContext &C, const DWARFSection &DWOSection,
+ DWARFSectionKind SectionKind, bool Lazy = false);
+
+ /// Add an existing DWARFUnit to this UnitVector. This is used by the DWARF
+ /// verifier to process unit separately.
+ DWARFUnit *addUnit(std::unique_ptr<DWARFUnit> Unit);
+
+ /// Returns number of all units held by this instance.
+ unsigned getNumUnits() const { return size(); }
+ /// Returns number of units from all .debug_info[.dwo] sections.
+ unsigned getNumInfoUnits() const {
+ return NumInfoUnits == -1 ? size() : NumInfoUnits;
}
+ /// Returns number of units from all .debug_types[.dwo] sections.
+ unsigned getNumTypesUnits() const { return size() - NumInfoUnits; }
+ /// Indicate that parsing .debug_info[.dwo] is done, and remaining units
+ /// will be from .debug_types[.dwo].
+ void finishedInfoUnits() { NumInfoUnits = size(); }
private:
- void parseImpl(DWARFContext &Context, const DWARFObject &Obj,
- const DWARFSection &Section, const DWARFDebugAbbrev *DA,
- const DWARFSection *RS, StringRef SS, const DWARFSection &SOS,
- const DWARFSection *AOS, const DWARFSection &LS, bool LE,
- bool IsDWO, bool Lazy) override {
- if (Parsed)
- return;
- DWARFDataExtractor Data(Obj, Section, LE, 0);
- if (!Parser) {
- const DWARFUnitIndex *Index = nullptr;
- if (IsDWO)
- Index = &getDWARFUnitIndex(Context, UnitType::Section);
- Parser = [=, &Context, &Section, &SOS,
- &LS](uint32_t Offset) -> std::unique_ptr<UnitType> {
- if (!Data.isValidOffset(Offset))
- return nullptr;
- DWARFUnitHeader Header;
- if (!Header.extract(Context, Data, &Offset, UnitType::Section, Index))
- return nullptr;
- auto U = llvm::make_unique<UnitType>(
- Context, Section, Header, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
- *this);
- return U;
- };
- }
- if (Lazy)
- return;
- auto I = this->begin();
- uint32_t Offset = 0;
- while (Data.isValidOffset(Offset)) {
- if (I != this->end() && (*I)->getOffset() == Offset) {
- ++I;
- continue;
- }
- auto U = Parser(Offset);
- if (!U)
- break;
- Offset = U->getNextUnitOffset();
- I = std::next(this->insert(I, std::move(U)));
- }
- Parsed = true;
- }
+ void addUnitsImpl(DWARFContext &Context, const DWARFObject &Obj,
+ const DWARFSection &Section, const DWARFDebugAbbrev *DA,
+ const DWARFSection *RS, const DWARFSection *LocSection,
+ StringRef SS, const DWARFSection &SOS,
+ const DWARFSection *AOS, const DWARFSection &LS, bool LE,
+ bool IsDWO, bool Lazy, DWARFSectionKind SectionKind);
};
/// Represents base address of the CU.
-struct BaseAddress {
- uint64_t Address;
- uint64_t SectionIndex;
-};
-
/// Represents a unit's contribution to the string offsets table.
struct StrOffsetsContributionDescriptor {
uint64_t Base = 0;
@@ -261,14 +195,20 @@ class DWARFUnit {
const DWARFDebugAbbrev *Abbrev;
const DWARFSection *RangeSection;
uint32_t RangeSectionBase;
+ /// We either keep track of the location list section or its data, depending
+ /// on whether we are handling a split DWARF section or not.
+ union {
+ const DWARFSection *LocSection;
+ StringRef LocSectionData;
+ };
const DWARFSection &LineSection;
StringRef StringSection;
const DWARFSection &StringOffsetSection;
const DWARFSection *AddrOffsetSection;
uint32_t AddrOffsetSectionBase = 0;
bool isLittleEndian;
- bool isDWO;
- const DWARFUnitSectionBase &UnitSection;
+ bool IsDWO;
+ const DWARFUnitVector &UnitVector;
/// Start, length, and DWARF format of the unit's contribution to the string
/// offsets table (DWARF v5).
@@ -278,7 +218,7 @@ class DWARFUnit {
Optional<DWARFDebugRnglistTable> RngListTable;
mutable const DWARFAbbreviationDeclarationSet *Abbrevs;
- llvm::Optional<BaseAddress> BaseAddr;
+ llvm::Optional<SectionedAddress> BaseAddr;
/// The compile unit debug information entry items.
std::vector<DWARFDebugInfoEntry> DieArray;
@@ -308,28 +248,30 @@ protected:
/// length and form. The given offset is expected to be derived from the unit
/// DIE's DW_AT_str_offsets_base attribute.
Optional<StrOffsetsContributionDescriptor>
- determineStringOffsetsTableContribution(DWARFDataExtractor &DA,
- uint64_t Offset);
+ determineStringOffsetsTableContribution(DWARFDataExtractor &DA);
/// Find the unit's contribution to the string offsets table and determine its
/// length and form. The given offset is expected to be 0 in a dwo file or,
/// in a dwp file, the start of the unit's contribution to the string offsets
/// table section (as determined by the index table).
Optional<StrOffsetsContributionDescriptor>
- determineStringOffsetsTableContributionDWO(DWARFDataExtractor &DA,
- uint64_t Offset);
+ determineStringOffsetsTableContributionDWO(DWARFDataExtractor &DA);
public:
DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
- const DWARFUnitHeader &Header,
- const DWARFDebugAbbrev *DA, const DWARFSection *RS, StringRef SS,
- const DWARFSection &SOS, const DWARFSection *AOS,
+ const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
+ const DWARFSection *RS, const DWARFSection *LocSection,
+ StringRef SS, const DWARFSection &SOS, const DWARFSection *AOS,
const DWARFSection &LS, bool LE, bool IsDWO,
- const DWARFUnitSectionBase &UnitSection);
+ const DWARFUnitVector &UnitVector);
virtual ~DWARFUnit();
+ bool isDWOUnit() const { return IsDWO; }
DWARFContext& getContext() const { return Context; }
+ const DWARFSection &getInfoSection() const { return InfoSection; }
+ const DWARFSection *getLocSection() const { return LocSection; }
+ StringRef getLocSectionData() const { return LocSectionData; }
uint32_t getOffset() const { return Header.getOffset(); }
const dwarf::FormParams &getFormParams() const {
return Header.getFormParams();
@@ -342,6 +284,7 @@ public:
}
uint32_t getLength() const { return Header.getLength(); }
uint8_t getUnitType() const { return Header.getUnitType(); }
+ bool isTypeUnit() const { return Header.isTypeUnit(); }
uint32_t getNextUnitOffset() const { return Header.getNextUnitOffset(); }
const DWARFSection &getLineSection() const { return LineSection; }
StringRef getStringSection() const { return StringSection; }
@@ -362,8 +305,8 @@ public:
RangeSectionBase = Base;
}
- bool getAddrOffsetSectionItem(uint32_t Index, uint64_t &Result) const;
- bool getStringOffsetSectionItem(uint32_t Index, uint64_t &Result) const;
+ Optional<SectionedAddress> getAddrOffsetSectionItem(uint32_t Index) const;
+ Optional<uint64_t> getStringOffsetSectionItem(uint32_t Index) const;
DWARFDataExtractor getDebugInfoExtractor() const;
@@ -433,7 +376,7 @@ public:
llvm_unreachable("Invalid UnitType.");
}
- llvm::Optional<BaseAddress> getBaseAddress();
+ llvm::Optional<SectionedAddress> getBaseAddress();
DWARFDie getUnitDIE(bool ExtractUnitDIEOnly = true) {
extractDIEsIfNeeded(ExtractUnitDIEOnly);
@@ -467,7 +410,7 @@ public:
return None;
}
- void collectAddressRanges(DWARFAddressRangesVector &CURanges);
+ Expected<DWARFAddressRangesVector> collectAddressRanges();
/// Returns subprogram DIE with address range encompassing the provided
/// address. The pointer is alive as long as parsed compile unit DIEs are not
@@ -480,8 +423,8 @@ public:
void getInlinedChainForAddress(uint64_t Address,
SmallVectorImpl<DWARFDie> &InlinedChain);
- /// getUnitSection - Return the DWARFUnitSection containing this unit.
- const DWARFUnitSectionBase &getUnitSection() const { return UnitSection; }
+ /// Return the DWARFUnitVector containing this unit.
+ const DWARFUnitVector &getUnitVector() const { return UnitVector; }
/// Returns the number of DIEs in the unit. Parses the unit
/// if necessary.
@@ -541,6 +484,7 @@ public:
return die_iterator_range(DieArray.begin(), DieArray.end());
}
+ virtual void dump(raw_ostream &OS, DIDumpOptions DumpOpts) = 0;
private:
/// Size in bytes of the .debug_info data associated with this compile unit.
size_t getDebugInfoSize() const {
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
index 49ed4bb222f3..16be5f9401c0 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
@@ -74,6 +74,7 @@ private:
int InfoColumn = -1;
std::unique_ptr<DWARFSectionKind[]> ColumnKinds;
std::unique_ptr<Entry[]> Rows;
+ mutable std::vector<Entry *> OffsetLookup;
static StringRef getColumnHeader(DWARFSectionKind DS);
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFVerifier.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
index a829510a219d..e47fbea5646e 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
@@ -14,6 +14,7 @@
#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include <cstdint>
#include <map>
@@ -96,10 +97,14 @@ private:
/// lies between to valid DIEs.
std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
uint32_t NumDebugLineErrors = 0;
+ // Used to relax some checks that do not currently work portably
+ bool IsObjectFile;
+ bool IsMachOObject;
raw_ostream &error() const;
raw_ostream &warn() const;
raw_ostream &note() const;
+ raw_ostream &dump(const DWARFDie &Die, unsigned indent = 0) const;
/// Verifies the abbreviations section.
///
@@ -113,20 +118,20 @@ private:
/// \returns The number of errors that occurred during verification.
unsigned verifyAbbrevSection(const DWARFDebugAbbrev *Abbrev);
- /// Verifies the header of a unit in the .debug_info section.
+ /// Verifies the header of a unit in a .debug_info or .debug_types section.
///
/// This function currently checks for:
/// - Unit is in 32-bit DWARF format. The function can be modified to
/// support 64-bit format.
/// - The DWARF version is valid
/// - The unit type is valid (if unit is in version >=5)
- /// - The unit doesn't extend beyond .debug_info section
+ /// - The unit doesn't extend beyond the containing section
/// - The address size is valid
/// - The offset in the .debug_abbrev section is valid
///
- /// \param DebugInfoData The .debug_info section data
+ /// \param DebugInfoData The section data
/// \param Offset A reference to the offset start of the unit. The offset will
- /// be updated to point to the next unit in .debug_info
+ /// be updated to point to the next unit in the section
/// \param UnitIndex The index of the unit to be verified
/// \param UnitType A reference to the type of the unit
/// \param isUnitDWARF64 A reference to a flag that shows whether the unit is
@@ -137,7 +142,7 @@ private:
uint32_t *Offset, unsigned UnitIndex, uint8_t &UnitType,
bool &isUnitDWARF64);
- /// Verifies the header of a unit in the .debug_info section.
+ /// Verifies the header of a unit in a .debug_info or .debug_types section.
///
/// This function currently verifies:
/// - The debug info attributes.
@@ -146,13 +151,29 @@ private:
/// - That the root DIE is a unit DIE.
/// - If a unit type is provided, that the unit DIE matches the unit type.
/// - The DIE ranges.
+ /// - That call site entries are only nested within subprograms with a
+ /// DW_AT_call attribute.
///
- /// \param Unit The DWARF Unit to verifiy.
- /// \param UnitType An optional unit type which will be used to verify the
- /// type of the unit DIE.
+ /// \param Unit The DWARF Unit to verify.
///
- /// \returns true if the content is verified successfully, false otherwise.
- bool verifyUnitContents(DWARFUnit &Unit, uint8_t UnitType = 0);
+ /// \returns The number of errors that occurred during verification.
+ unsigned verifyUnitContents(DWARFUnit &Unit);
+
+ /// Verifies the unit headers and contents in a .debug_info or .debug_types
+ /// section.
+ ///
+ /// \param S The DWARF Section to verify.
+ /// \param SectionKind The object-file section kind that S comes from.
+ ///
+ /// \returns The number of errors that occurred during verification.
+ unsigned verifyUnitSection(const DWARFSection &S,
+ DWARFSectionKind SectionKind);
+
+ /// Verifies that a call site entry is nested within a subprogram with a
+ /// DW_AT_call attribute.
+ ///
+ /// \returns Number of errors that occurred during verification.
+ unsigned verifyDebugInfoCallSite(const DWARFDie &Die);
/// Verify that all Die ranges are valid.
///
@@ -172,7 +193,7 @@ private:
/// \param AttrValue The DWARF attribute value to check
///
/// \returns NumErrors The number of errors occurred during verification of
- /// attributes' values in a .debug_info section unit
+ /// attributes' values in a unit
unsigned verifyDebugInfoAttribute(const DWARFDie &Die,
DWARFAttribute &AttrValue);
@@ -180,14 +201,14 @@ private:
///
/// This function currently checks for:
/// - All DW_FORM_ref values that are CU relative have valid CU offsets
- /// - All DW_FORM_ref_addr values have valid .debug_info offsets
+ /// - All DW_FORM_ref_addr values have valid section offsets
/// - All DW_FORM_strp values have valid .debug_str offsets
///
/// \param Die The DWARF DIE that owns the attribute value
/// \param AttrValue The DWARF attribute value to check
///
/// \returns NumErrors The number of errors occurred during verification of
- /// attributes' forms in a .debug_info section unit
+ /// attributes' forms in a unit
unsigned verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the all valid references that were found when iterating through
@@ -199,7 +220,7 @@ private:
/// CU relative and absolute references.
///
/// \returns NumErrors The number of errors occurred during verification of
- /// references for the .debug_info section
+ /// references for the .debug_info and .debug_types sections
unsigned verifyDebugInfoReferences();
/// Verify the DW_AT_stmt_list encoding and value and ensure that no
@@ -268,8 +289,8 @@ private:
public:
DWARFVerifier(raw_ostream &S, DWARFContext &D,
- DIDumpOptions DumpOpts = DIDumpOptions::getForSingleDIE())
- : OS(S), DCtx(D), DumpOpts(std::move(DumpOpts)) {}
+ DIDumpOptions DumpOpts = DIDumpOptions::getForSingleDIE());
+
/// Verify the information in any of the following sections, if available:
/// .debug_abbrev, debug_abbrev.dwo
///
@@ -280,12 +301,12 @@ public:
/// false otherwise.
bool handleDebugAbbrev();
- /// Verify the information in the .debug_info section.
+ /// Verify the information in the .debug_info and .debug_types sections.
///
- /// Any errors are reported to the stream that was this object was
+ /// Any errors are reported to the stream that this object was
/// constructed with.
///
- /// \returns true if the .debug_info verifies successfully, false otherwise.
+ /// \returns true if all sections verify successfully, false otherwise.
bool handleDebugInfo();
/// Verify the information in the .debug_line section.
diff --git a/contrib/llvm/include/llvm/DebugInfo/MSF/MSFError.h b/contrib/llvm/include/llvm/DebugInfo/MSF/MSFError.h
index e66aeca3cd45..5c043a7837b3 100644
--- a/contrib/llvm/include/llvm/DebugInfo/MSF/MSFError.h
+++ b/contrib/llvm/include/llvm/DebugInfo/MSF/MSFError.h
@@ -24,22 +24,28 @@ enum class msf_error_code {
invalid_format,
block_in_use
};
+} // namespace msf
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::msf::msf_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace msf {
+const std::error_category &MSFErrCategory();
+
+inline std::error_code make_error_code(msf_error_code E) {
+ return std::error_code(static_cast<int>(E), MSFErrCategory());
+}
/// Base class for errors originating when parsing raw PDB files
-class MSFError : public ErrorInfo<MSFError> {
+class MSFError : public ErrorInfo<MSFError, StringError> {
public:
+ using ErrorInfo<MSFError, StringError>::ErrorInfo; // inherit constructors
+ MSFError(const Twine &S) : ErrorInfo(S, msf_error_code::unspecified) {}
static char ID;
- MSFError(msf_error_code C);
- MSFError(const std::string &Context);
- MSFError(msf_error_code C, const std::string &Context);
-
- void log(raw_ostream &OS) const override;
- const std::string &getErrorMessage() const;
- std::error_code convertToErrorCode() const override;
-
-private:
- std::string ErrMsg;
- msf_error_code Code;
};
} // namespace msf
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h b/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
index 9713dce362d2..ac7f19637ab1 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
@@ -43,11 +43,6 @@ public:
void reset() override { Enumerator->reset(); }
- ConcreteSymbolEnumerator<ChildType> *clone() const override {
- std::unique_ptr<IPDBEnumSymbols> WrappedClone(Enumerator->clone());
- return new ConcreteSymbolEnumerator<ChildType>(std::move(WrappedClone));
- }
-
private:
std::unique_ptr<IPDBEnumSymbols> Enumerator;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
index 930bea6060b2..881d7329ab66 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
@@ -24,7 +24,6 @@ public:
llvm::Optional<RecordType> getItemAtIndex(uint32_t Index) const override;
bool getNext(RecordType &Record) override;
void reset() override;
- DIADataStream *clone() const override;
private:
CComPtr<IDiaEnumDebugStreamData> StreamData;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
index ffae6645e94b..1f129052d034 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
@@ -27,7 +27,6 @@ public:
ChildTypePtr getChildAtIndex(uint32_t Index) const override;
ChildTypePtr getNext() override;
void reset() override;
- DIAEnumDebugStreams *clone() const override;
private:
CComPtr<IDiaEnumDebugStreams> Enumerator;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h
new file mode 100644
index 000000000000..f3b02f07e648
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumFrameData.h
@@ -0,0 +1,36 @@
+//==- DIAEnumFrameData.h --------------------------------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
+
+namespace llvm {
+namespace pdb {
+
+class DIAEnumFrameData : public IPDBEnumChildren<IPDBFrameData> {
+public:
+ explicit DIAEnumFrameData(CComPtr<IDiaEnumFrameData> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+
+private:
+ CComPtr<IDiaEnumFrameData> Enumerator;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
index 39490a4b2209..4669a8d31196 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumInjectedSources.h
@@ -16,22 +16,18 @@
namespace llvm {
namespace pdb {
-class DIASession;
class DIAEnumInjectedSources : public IPDBEnumChildren<IPDBInjectedSource> {
public:
explicit DIAEnumInjectedSources(
- const DIASession &PDBSession,
CComPtr<IDiaEnumInjectedSources> DiaEnumerator);
uint32_t getChildCount() const override;
ChildTypePtr getChildAtIndex(uint32_t Index) const override;
ChildTypePtr getNext() override;
void reset() override;
- DIAEnumInjectedSources *clone() const override;
private:
- const DIASession &Session;
CComPtr<IDiaEnumInjectedSources> Enumerator;
};
} // namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
index 08f0de124ede..f1cb6268a26d 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
@@ -26,7 +26,6 @@ public:
ChildTypePtr getChildAtIndex(uint32_t Index) const override;
ChildTypePtr getNext() override;
void reset() override;
- DIAEnumLineNumbers *clone() const override;
private:
CComPtr<IDiaEnumLineNumbers> Enumerator;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
index 52c9563b5d5f..ac2ae317d263 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSectionContribs.h
@@ -28,7 +28,6 @@ public:
ChildTypePtr getChildAtIndex(uint32_t Index) const override;
ChildTypePtr getNext() override;
void reset() override;
- DIAEnumSectionContribs *clone() const override;
private:
const DIASession &Session;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
index e69d18f5ba37..dac3df06a178 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
@@ -27,7 +27,6 @@ public:
ChildTypePtr getChildAtIndex(uint32_t Index) const override;
ChildTypePtr getNext() override;
void reset() override;
- DIAEnumSourceFiles *clone() const override;
private:
const DIASession &Session;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
index f779cd1f4be3..9689859ae0f8 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
@@ -27,7 +27,6 @@ public:
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
std::unique_ptr<PDBSymbol> getNext() override;
void reset() override;
- DIAEnumSymbols *clone() const override;
private:
const DIASession &Session;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
index 926fcfe69648..f4f856ebb6fd 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumTables.h
@@ -26,7 +26,6 @@ public:
std::unique_ptr<IPDBTable> getChildAtIndex(uint32_t Index) const override;
std::unique_ptr<IPDBTable> getNext() override;
void reset() override;
- DIAEnumTables *clone() const override;
private:
CComPtr<IDiaEnumTables> Enumerator;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAError.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAError.h
index 35a39a0df5ca..2b33a65a0a14 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAError.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAError.h
@@ -23,23 +23,29 @@ enum class dia_error_code {
already_loaded,
debug_info_mismatch,
};
+} // namespace pdb
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::pdb::dia_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace pdb {
+const std::error_category &DIAErrCategory();
+
+inline std::error_code make_error_code(dia_error_code E) {
+ return std::error_code(static_cast<int>(E), DIAErrCategory());
+}
/// Base class for errors originating in DIA SDK, e.g. COM calls
-class DIAError : public ErrorInfo<DIAError> {
+class DIAError : public ErrorInfo<DIAError, StringError> {
public:
+ using ErrorInfo<DIAError, StringError>::ErrorInfo;
+ DIAError(const Twine &S) : ErrorInfo(S, dia_error_code::unspecified) {}
static char ID;
- DIAError(dia_error_code C);
- DIAError(StringRef Context);
- DIAError(dia_error_code C, StringRef Context);
-
- void log(raw_ostream &OS) const override;
- StringRef getErrorMessage() const;
- std::error_code convertToErrorCode() const override;
-
-private:
- std::string ErrMsg;
- dia_error_code Code;
};
-}
-}
+} // namespace pdb
+} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h
new file mode 100644
index 000000000000..0ce6cfc93030
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAFrameData.h
@@ -0,0 +1,39 @@
+//===- DIAFrameData.h - DIA Impl. of IPDBFrameData ---------------- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
+
+namespace llvm {
+namespace pdb {
+
+class DIASession;
+
+class DIAFrameData : public IPDBFrameData {
+public:
+ explicit DIAFrameData(CComPtr<IDiaFrameData> DiaFrameData);
+
+ uint32_t getAddressOffset() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getLengthBlock() const override;
+ std::string getProgram() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint64_t getVirtualAddress() const override;
+
+private:
+ CComPtr<IDiaFrameData> FrameData;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
index dfb35647055a..5d4f855c63ca 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
@@ -20,7 +20,8 @@ class DIARawSymbol : public IPDBRawSymbol {
public:
DIARawSymbol(const DIASession &PDBSession, CComPtr<IDiaSymbol> DiaSymbol);
- void dump(raw_ostream &OS, int Indent) const override;
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
CComPtr<IDiaSymbol> getDiaSymbol() const { return Symbol; }
@@ -63,25 +64,25 @@ public:
uint32_t getAddressOffset() const override;
uint32_t getAddressSection() const override;
uint32_t getAge() const override;
- uint32_t getArrayIndexTypeId() const override;
+ SymIndexId getArrayIndexTypeId() const override;
uint32_t getBaseDataOffset() const override;
uint32_t getBaseDataSlot() const override;
- uint32_t getBaseSymbolId() const override;
+ SymIndexId getBaseSymbolId() const override;
PDB_BuiltinType getBuiltinType() const override;
uint32_t getBitPosition() const override;
PDB_CallingConv getCallingConvention() const override;
- uint32_t getClassParentId() const override;
+ SymIndexId getClassParentId() const override;
std::string getCompilerName() const override;
uint32_t getCount() const override;
uint32_t getCountLiveRanges() const override;
PDB_Lang getLanguage() const override;
- uint32_t getLexicalParentId() const override;
+ SymIndexId getLexicalParentId() const override;
std::string getLibraryName() const override;
uint32_t getLiveRangeStartAddressOffset() const override;
uint32_t getLiveRangeStartAddressSection() const override;
uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
codeview::RegisterId getLocalBasePointerRegisterId() const override;
- uint32_t getLowerBoundId() const override;
+ SymIndexId getLowerBoundId() const override;
uint32_t getMemorySpaceKind() const override;
std::string getName() const override;
uint32_t getNumberOfAcceleratorPointerTags() const override;
@@ -91,7 +92,7 @@ public:
uint32_t getNumberOfRows() const override;
std::string getObjectFileName() const override;
uint32_t getOemId() const override;
- uint32_t getOemSymbolId() const override;
+ SymIndexId getOemSymbolId() const override;
uint32_t getOffsetInUdt() const override;
PDB_Cpu getPlatform() const override;
uint32_t getRank() const override;
@@ -105,9 +106,9 @@ public:
std::string getSourceFileName() const override;
std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
uint32_t getStride() const override;
- uint32_t getSubTypeId() const override;
+ SymIndexId getSubTypeId() const override;
std::string getSymbolsFileName() const override;
- uint32_t getSymIndexId() const override;
+ SymIndexId getSymIndexId() const override;
uint32_t getTargetOffset() const override;
uint32_t getTargetRelativeVirtualAddress() const override;
uint64_t getTargetVirtualAddress() const override;
@@ -115,16 +116,16 @@ public:
uint32_t getTextureSlot() const override;
uint32_t getTimeStamp() const override;
uint32_t getToken() const override;
- uint32_t getTypeId() const override;
+ SymIndexId getTypeId() const override;
uint32_t getUavSlot() const override;
std::string getUndecoratedName() const override;
std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
- uint32_t getUnmodifiedTypeId() const override;
- uint32_t getUpperBoundId() const override;
+ SymIndexId getUnmodifiedTypeId() const override;
+ SymIndexId getUpperBoundId() const override;
Variant getValue() const override;
uint32_t getVirtualBaseDispIndex() const override;
uint32_t getVirtualBaseOffset() const override;
- uint32_t getVirtualTableShapeId() const override;
+ SymIndexId getVirtualTableShapeId() const override;
std::unique_ptr<PDBSymbolTypeBuiltin>
getVirtualBaseTableType() const override;
PDB_DataKind getDataKind() const override;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h
index a63659439389..592e061a8d83 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h
@@ -32,7 +32,7 @@ public:
uint64_t getLoadAddress() const override;
bool setLoadAddress(uint64_t Address) override;
std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
- std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const override;
+ std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const override;
bool addressForVA(uint64_t VA, uint32_t &Section,
uint32_t &Offset) const override;
@@ -85,6 +85,7 @@ public:
std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;
+ std::unique_ptr<IPDBEnumFrameData> getFrameData() const override;
private:
CComPtr<IDiaSession> Session;
};
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/GenericError.h b/contrib/llvm/include/llvm/DebugInfo/PDB/GenericError.h
index 03205a986f1a..997f13f5f30e 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/GenericError.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/GenericError.h
@@ -16,29 +16,37 @@
namespace llvm {
namespace pdb {
-enum class generic_error_code {
- invalid_path = 1,
+enum class pdb_error_code {
+ invalid_utf8_path = 1,
dia_sdk_not_present,
- type_server_not_found,
+ dia_failed_loading,
+ signature_out_of_date,
+ external_cmdline_ref,
unspecified,
};
+} // namespace pdb
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::pdb::pdb_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace pdb {
+const std::error_category &PDBErrCategory();
+
+inline std::error_code make_error_code(pdb_error_code E) {
+ return std::error_code(static_cast<int>(E), PDBErrCategory());
+}
/// Base class for errors originating when parsing raw PDB files
-class GenericError : public ErrorInfo<GenericError> {
+class PDBError : public ErrorInfo<PDBError, StringError> {
public:
+ using ErrorInfo<PDBError, StringError>::ErrorInfo; // inherit constructors
+ PDBError(const Twine &S) : ErrorInfo(S, pdb_error_code::unspecified) {}
static char ID;
- GenericError(generic_error_code C);
- GenericError(StringRef Context);
- GenericError(generic_error_code C, StringRef Context);
-
- void log(raw_ostream &OS) const override;
- StringRef getErrorMessage() const;
- std::error_code convertToErrorCode() const override;
-
-private:
- std::string ErrMsg;
- generic_error_code Code;
};
-}
-}
+} // namespace pdb
+} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h
index 67b5a06d7c59..0d7a286a11a6 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h
@@ -32,7 +32,6 @@ public:
virtual Optional<RecordType> getItemAtIndex(uint32_t Index) const = 0;
virtual bool getNext(RecordType &Record) = 0;
virtual void reset() = 0;
- virtual IPDBDataStream *clone() const = 0;
};
} // end namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
index b6b7d95f6282..7017f2600e9b 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
@@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
#define LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
+#include <cassert>
#include <cstdint>
#include <memory>
@@ -27,7 +28,19 @@ public:
virtual ChildTypePtr getChildAtIndex(uint32_t Index) const = 0;
virtual ChildTypePtr getNext() = 0;
virtual void reset() = 0;
- virtual MyType *clone() const = 0;
+};
+
+template <typename ChildType>
+class NullEnumerator : public IPDBEnumChildren<ChildType> {
+ virtual uint32_t getChildCount() const override { return 0; }
+ virtual std::unique_ptr<ChildType>
+ getChildAtIndex(uint32_t Index) const override {
+ return nullptr;
+ }
+ virtual std::unique_ptr<ChildType> getNext() override {
+ return nullptr;
+ }
+ virtual void reset() override {}
};
} // end namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBFrameData.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBFrameData.h
new file mode 100644
index 000000000000..74679215b880
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBFrameData.h
@@ -0,0 +1,36 @@
+//===- IPDBFrameData.h - base interface for frame data ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBFRAMEDATA_H
+#define LLVM_DEBUGINFO_PDB_IPDBFRAMEDATA_H
+
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+namespace pdb {
+
+/// IPDBFrameData defines an interface used to represent a frame data of some
+/// code block.
+class IPDBFrameData {
+public:
+ virtual ~IPDBFrameData();
+
+ virtual uint32_t getAddressOffset() const = 0;
+ virtual uint32_t getAddressSection() const = 0;
+ virtual uint32_t getLengthBlock() const = 0;
+ virtual std::string getProgram() const = 0;
+ virtual uint32_t getRelativeVirtualAddress() const = 0;
+ virtual uint64_t getVirtualAddress() const = 0;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
index bcb2eaa35630..7c818d7cadeb 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
@@ -11,6 +11,7 @@
#define LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
#include "PDBTypes.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
@@ -21,9 +22,26 @@ class raw_ostream;
namespace pdb {
+class IPDBSession;
class PDBSymbolTypeVTable;
class PDBSymbolTypeVTableShape;
+enum class PdbSymbolIdField : uint32_t {
+ None = 0,
+ SymIndexId = 1 << 0,
+ LexicalParent = 1 << 1,
+ ClassParent = 1 << 2,
+ Type = 1 << 3,
+ UnmodifiedType = 1 << 4,
+ All = 0xFFFFFFFF,
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ All)
+};
+
+void dumpSymbolIdField(raw_ostream &OS, StringRef Name, SymIndexId Value,
+ int Indent, const IPDBSession &Session,
+ PdbSymbolIdField FieldId, PdbSymbolIdField ShowFlags,
+ PdbSymbolIdField RecurseFlags);
+
/// IPDBRawSymbol defines an interface used to represent an arbitrary symbol.
/// It exposes a monolithic interface consisting of accessors for the union of
/// all properties that are valid for any symbol type. This interface is then
@@ -33,7 +51,8 @@ class IPDBRawSymbol {
public:
virtual ~IPDBRawSymbol();
- virtual void dump(raw_ostream &OS, int Indent) const = 0;
+ virtual void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const = 0;
virtual std::unique_ptr<IPDBEnumSymbols>
findChildren(PDB_SymType Type) const = 0;
@@ -74,26 +93,26 @@ public:
virtual uint32_t getAddressOffset() const = 0;
virtual uint32_t getAddressSection() const = 0;
virtual uint32_t getAge() const = 0;
- virtual uint32_t getArrayIndexTypeId() const = 0;
+ virtual SymIndexId getArrayIndexTypeId() const = 0;
virtual uint32_t getBaseDataOffset() const = 0;
virtual uint32_t getBaseDataSlot() const = 0;
- virtual uint32_t getBaseSymbolId() const = 0;
+ virtual SymIndexId getBaseSymbolId() const = 0;
virtual PDB_BuiltinType getBuiltinType() const = 0;
virtual uint32_t getBitPosition() const = 0;
virtual PDB_CallingConv getCallingConvention() const = 0;
- virtual uint32_t getClassParentId() const = 0;
+ virtual SymIndexId getClassParentId() const = 0;
virtual std::string getCompilerName() const = 0;
virtual uint32_t getCount() const = 0;
virtual uint32_t getCountLiveRanges() const = 0;
virtual void getFrontEndVersion(VersionInfo &Version) const = 0;
virtual PDB_Lang getLanguage() const = 0;
- virtual uint32_t getLexicalParentId() const = 0;
+ virtual SymIndexId getLexicalParentId() const = 0;
virtual std::string getLibraryName() const = 0;
virtual uint32_t getLiveRangeStartAddressOffset() const = 0;
virtual uint32_t getLiveRangeStartAddressSection() const = 0;
virtual uint32_t getLiveRangeStartRelativeVirtualAddress() const = 0;
virtual codeview::RegisterId getLocalBasePointerRegisterId() const = 0;
- virtual uint32_t getLowerBoundId() const = 0;
+ virtual SymIndexId getLowerBoundId() const = 0;
virtual uint32_t getMemorySpaceKind() const = 0;
virtual std::string getName() const = 0;
virtual uint32_t getNumberOfAcceleratorPointerTags() const = 0;
@@ -103,7 +122,7 @@ public:
virtual uint32_t getNumberOfRows() const = 0;
virtual std::string getObjectFileName() const = 0;
virtual uint32_t getOemId() const = 0;
- virtual uint32_t getOemSymbolId() const = 0;
+ virtual SymIndexId getOemSymbolId() const = 0;
virtual uint32_t getOffsetInUdt() const = 0;
virtual PDB_Cpu getPlatform() const = 0;
virtual uint32_t getRank() const = 0;
@@ -118,9 +137,9 @@ public:
virtual std::unique_ptr<IPDBLineNumber>
getSrcLineOnTypeDefn() const = 0;
virtual uint32_t getStride() const = 0;
- virtual uint32_t getSubTypeId() const = 0;
+ virtual SymIndexId getSubTypeId() const = 0;
virtual std::string getSymbolsFileName() const = 0;
- virtual uint32_t getSymIndexId() const = 0;
+ virtual SymIndexId getSymIndexId() const = 0;
virtual uint32_t getTargetOffset() const = 0;
virtual uint32_t getTargetRelativeVirtualAddress() const = 0;
virtual uint64_t getTargetVirtualAddress() const = 0;
@@ -128,18 +147,18 @@ public:
virtual uint32_t getTextureSlot() const = 0;
virtual uint32_t getTimeStamp() const = 0;
virtual uint32_t getToken() const = 0;
- virtual uint32_t getTypeId() const = 0;
+ virtual SymIndexId getTypeId() const = 0;
virtual uint32_t getUavSlot() const = 0;
virtual std::string getUndecoratedName() const = 0;
virtual std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const = 0;
- virtual uint32_t getUnmodifiedTypeId() const = 0;
- virtual uint32_t getUpperBoundId() const = 0;
+ virtual SymIndexId getUnmodifiedTypeId() const = 0;
+ virtual SymIndexId getUpperBoundId() const = 0;
virtual Variant getValue() const = 0;
virtual uint32_t getVirtualBaseDispIndex() const = 0;
virtual uint32_t getVirtualBaseOffset() const = 0;
virtual std::unique_ptr<PDBSymbolTypeBuiltin>
getVirtualBaseTableType() const = 0;
- virtual uint32_t getVirtualTableShapeId() const = 0;
+ virtual SymIndexId getVirtualTableShapeId() const = 0;
virtual PDB_DataKind getDataKind() const = 0;
virtual PDB_SymType getSymTag() const = 0;
virtual codeview::GUID getGuid() const = 0;
@@ -237,6 +256,8 @@ public:
virtual std::string getUnused() const = 0;
};
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
} // namespace pdb
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h
index 88ec517bc4a5..88fd02c0a345 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h
@@ -30,7 +30,8 @@ public:
virtual uint64_t getLoadAddress() const = 0;
virtual bool setLoadAddress(uint64_t Address) = 0;
virtual std::unique_ptr<PDBSymbolExe> getGlobalScope() = 0;
- virtual std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const = 0;
+ virtual std::unique_ptr<PDBSymbol>
+ getSymbolById(SymIndexId SymbolId) const = 0;
virtual bool addressForVA(uint64_t VA, uint32_t &Section,
uint32_t &Offset) const = 0;
@@ -38,7 +39,7 @@ public:
uint32_t &Offset) const = 0;
template <typename T>
- std::unique_ptr<T> getConcreteSymbolById(uint32_t SymbolId) const {
+ std::unique_ptr<T> getConcreteSymbolById(SymIndexId SymbolId) const {
return unique_dyn_cast_or_null<T>(getSymbolById(SymbolId));
}
@@ -90,6 +91,9 @@ public:
virtual std::unique_ptr<IPDBEnumSectionContribs>
getSectionContribs() const = 0;
+
+ virtual std::unique_ptr<IPDBEnumFrameData>
+ getFrameData() const = 0;
};
} // namespace pdb
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
index ce4d07917755..ac7f741afefa 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -51,6 +51,7 @@ public:
void setObjFileName(StringRef Name);
void setFirstSectionContrib(const SectionContrib &SC);
void addSymbol(codeview::CVSymbol Symbol);
+ void addSymbolsInBulk(ArrayRef<uint8_t> BulkSymbols);
void
addDebugSubsection(std::shared_ptr<codeview::DebugSubsection> Subsection);
@@ -91,7 +92,7 @@ private:
std::string ModuleName;
std::string ObjFileName;
std::vector<std::string> SourceFiles;
- std::vector<codeview::CVSymbol> Symbols;
+ std::vector<ArrayRef<uint8_t>> Symbols;
std::vector<std::unique_ptr<codeview::DebugSubsectionRecordBuilder>>
C13Builders;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index 280615bdb507..a3ca607efbef 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -78,7 +78,7 @@ public:
const DbiModuleList &modules() const;
- FixedStreamArray<object::coff_section> getSectionHeaders();
+ FixedStreamArray<object::coff_section> getSectionHeaders() const;
FixedStreamArray<object::FpoData> getFpoRecords();
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index 51befcdac775..b538de576677 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -15,6 +15,7 @@
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Support/Error.h"
+#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
@@ -24,11 +25,15 @@
#include "llvm/Support/Endian.h"
namespace llvm {
+namespace codeview {
+struct FrameData;
+}
namespace msf {
class MSFBuilder;
}
namespace object {
struct coff_section;
+struct FpoData;
}
namespace pdb {
class DbiStream;
@@ -65,6 +70,8 @@ public:
void setGlobalsStreamIndex(uint32_t Index);
void setPublicsStreamIndex(uint32_t Index);
void setSymbolRecordStreamIndex(uint32_t Index);
+ void addNewFpoData(const codeview::FrameData &FD);
+ void addOldFpoData(const object::FpoData &Fpo);
Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
Error addModuleSourceFile(DbiModuleDescriptorBuilder &Module, StringRef File);
@@ -84,7 +91,8 @@ public:
private:
struct DebugStream {
- ArrayRef<uint8_t> Data;
+ std::function<Error(BinaryStreamWriter &)> WriteFn;
+ uint32_t Size = 0;
uint16_t StreamNumber = kInvalidStreamIndex;
};
@@ -117,6 +125,9 @@ private:
std::vector<std::unique_ptr<DbiModuleDescriptorBuilder>> ModiList;
+ Optional<codeview::DebugFrameDataSubsection> NewFpoData;
+ std::vector<object::FpoData> OldFpoData;
+
StringMap<uint32_t> SourceFileNames;
PDBStringTableBuilder ECNamesBuilder;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h
index 1a4f89d607df..4c39ca762b5b 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h
@@ -61,7 +61,6 @@ public:
void addGlobalSymbol(const codeview::ProcRefSym &Sym);
void addGlobalSymbol(const codeview::DataSym &Sym);
void addGlobalSymbol(const codeview::ConstantSym &Sym);
- void addGlobalSymbol(const codeview::UDTSym &Sym);
void addGlobalSymbol(const codeview::CVSymbol &Sym);
private:
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h
index dd04b5c5681d..7f84564ee988 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h
@@ -10,18 +10,20 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
#define LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
+#include "llvm/ADT/iterator.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Error.h"
-#include "llvm/ADT/iterator.h"
namespace llvm {
namespace pdb {
class DbiStream;
class PDBFile;
+class SymbolStream;
/// Iterator over hash records producing symbol record offsets. Abstracts away
/// the fact that symbol record offsets on disk are off-by-one.
@@ -50,8 +52,9 @@ class GSIHashTable {
public:
const GSIHashHeader *HashHdr;
FixedStreamArray<PSHashRecord> HashRecords;
- ArrayRef<uint8_t> HashBitmap;
+ FixedStreamArray<support::ulittle32_t> HashBitmap;
FixedStreamArray<support::ulittle32_t> HashBuckets;
+ std::array<int32_t, IPHR_HASH + 1> BucketMap;
Error read(BinaryStreamReader &Reader);
@@ -72,6 +75,9 @@ public:
const GSIHashTable &getGlobalsTable() const { return GlobalsTable; }
Error reload();
+ std::vector<std::pair<uint32_t, codeview::CVSymbol>>
+ findRecordsByName(StringRef Name, const SymbolStream &Symbols) const;
+
private:
GSIHashTable GlobalsTable;
std::unique_ptr<msf::MappedBlockStream> Stream;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h
index 419e8ada06f7..101127a355f5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h
@@ -35,11 +35,18 @@ public:
InfoStreamBuilder &operator=(const InfoStreamBuilder &) = delete;
void setVersion(PdbRaw_ImplVer V);
+ void addFeature(PdbRaw_FeatureSig Sig);
+
+ // If this is true, the PDB contents are hashed and this hash is used as
+ // PDB GUID and as Signature. The age is always 1.
+ void setHashPDBContentsToGUID(bool B);
+
+ // These only have an effect if hashPDBContentsToGUID() is false.
void setSignature(uint32_t S);
void setAge(uint32_t A);
void setGuid(codeview::GUID G);
- void addFeature(PdbRaw_FeatureSig Sig);
+ bool hashPDBContentsToGUID() const { return HashPDBContentsToGUID; }
uint32_t getAge() const { return Age; }
codeview::GUID getGuid() const { return Guid; }
Optional<uint32_t> getSignature() const { return Signature; }
@@ -60,6 +67,8 @@ private:
Optional<uint32_t> Signature;
codeview::GUID Guid;
+ bool HashPDBContentsToGUID = false;
+
NamedStreamMap &NamedStreams;
};
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
index efc25e0559b9..8d590df288f3 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
@@ -15,6 +15,7 @@
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@@ -43,6 +44,8 @@ public:
symbols(bool *HadError) const;
const codeview::CVSymbolArray &getSymbolArray() const { return SymbolArray; }
+ const codeview::CVSymbolArray
+ getSymbolArrayForScope(uint32_t ScopeBegin) const;
BinarySubstreamRef getSymbolsSubstream() const;
BinarySubstreamRef getC11LinesSubstream() const;
@@ -51,6 +54,8 @@ public:
ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = delete;
+ codeview::CVSymbol readSymbolAtOffset(uint32_t Offset) const;
+
iterator_range<DebugSubsectionIterator> subsections() const;
codeview::DebugSubsectionArray getSubsectionsArray() const {
return Subsections;
@@ -64,7 +69,7 @@ public:
findChecksumsSubsection() const;
private:
- const DbiModuleDescriptor &Mod;
+ DbiModuleDescriptor Mod;
uint32_t Signature;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index bd5c09e5ff76..3cd465503044 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -21,11 +21,12 @@ public:
NativeCompilandSymbol(NativeSession &Session, SymIndexId SymbolId,
DbiModuleDescriptor MI);
- std::unique_ptr<NativeRawSymbol> clone() const override;
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
PDB_SymType getSymTag() const override;
bool isEditAndContinueEnabled() const override;
- uint32_t getLexicalParentId() const override;
+ SymIndexId getLexicalParentId() const override;
std::string getLibraryName() const override;
std::string getName() const override;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h
new file mode 100644
index 000000000000..4442a1ec41fb
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumGlobals.h
@@ -0,0 +1,43 @@
+//==- NativeEnumGlobals.h - Native Global Enumerator impl --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMGLOBALS_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMGLOBALS_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBSymbol.h"
+
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeEnumGlobals : public IPDBEnumChildren<PDBSymbol> {
+public:
+ NativeEnumGlobals(NativeSession &Session,
+ std::vector<codeview::SymbolKind> Kinds);
+
+ uint32_t getChildCount() const override;
+ std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+ std::unique_ptr<PDBSymbol> getNext() override;
+ void reset() override;
+
+private:
+ std::vector<uint32_t> MatchOffsets;
+ uint32_t Index;
+ NativeSession &Session;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
index 6aa1460dbb4e..c268641a1008 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
@@ -11,28 +11,23 @@
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
-#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
namespace llvm {
namespace pdb {
-class DbiModuleList;
class NativeSession;
class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
public:
- NativeEnumModules(NativeSession &Session, const DbiModuleList &Modules,
- uint32_t Index = 0);
+ NativeEnumModules(NativeSession &Session, uint32_t Index = 0);
uint32_t getChildCount() const override;
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
std::unique_ptr<PDBSymbol> getNext() override;
void reset() override;
- NativeEnumModules *clone() const override;
private:
NativeSession &Session;
- const DbiModuleList &Modules;
uint32_t Index;
};
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h
deleted file mode 100644
index 41b7b78b8d80..000000000000
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumSymbol.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===- NativeEnumSymbol.h - info about enum type ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
-#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
-
-#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
-#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
-#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
-
-namespace llvm {
-namespace pdb {
-
-class NativeEnumSymbol : public NativeRawSymbol,
- public codeview::TypeVisitorCallbacks {
-public:
- NativeEnumSymbol(NativeSession &Session, SymIndexId Id,
- const codeview::CVType &CV);
- ~NativeEnumSymbol() override;
-
- std::unique_ptr<NativeRawSymbol> clone() const override;
-
- std::unique_ptr<IPDBEnumSymbols>
- findChildren(PDB_SymType Type) const override;
-
- Error visitKnownRecord(codeview::CVType &CVR,
- codeview::EnumRecord &Record) override;
- Error visitKnownMember(codeview::CVMemberRecord &CVM,
- codeview::EnumeratorRecord &Record) override;
-
- PDB_SymType getSymTag() const override;
- uint32_t getClassParentId() const override;
- uint32_t getUnmodifiedTypeId() const override;
- bool hasConstructor() const override;
- bool hasAssignmentOperator() const override;
- bool hasCastOperator() const override;
- uint64_t getLength() const override;
- std::string getName() const override;
- bool isNested() const override;
- bool hasOverloadedOperator() const override;
- bool isPacked() const override;
- bool isScoped() const override;
- uint32_t getTypeId() const override;
-
-protected:
- codeview::CVType CV;
- codeview::EnumRecord Record;
-};
-
-} // namespace pdb
-} // namespace llvm
-
-#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOL_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h
index e0a5c8d9ad81..f8ac1655dc61 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumTypes.h
@@ -26,23 +26,20 @@ class NativeEnumTypes : public IPDBEnumChildren<PDBSymbol> {
public:
NativeEnumTypes(NativeSession &Session,
codeview::LazyRandomTypeCollection &TypeCollection,
- codeview::TypeLeafKind Kind);
+ std::vector<codeview::TypeLeafKind> Kinds);
+
+ NativeEnumTypes(NativeSession &Session,
+ std::vector<codeview::TypeIndex> Indices);
uint32_t getChildCount() const override;
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
std::unique_ptr<PDBSymbol> getNext() override;
void reset() override;
- NativeEnumTypes *clone() const override;
private:
- NativeEnumTypes(NativeSession &Session,
- const std::vector<codeview::TypeIndex> &Matches,
- codeview::TypeLeafKind Kind);
-
std::vector<codeview::TypeIndex> Matches;
uint32_t Index;
NativeSession &Session;
- codeview::TypeLeafKind Kind;
};
} // namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
index 587c7ff2b092..f4030da1d026 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
@@ -16,11 +16,14 @@
namespace llvm {
namespace pdb {
+class DbiStream;
+
class NativeExeSymbol : public NativeRawSymbol {
-public:
- NativeExeSymbol(NativeSession &Session, SymIndexId SymbolId);
+ // EXE symbol is the authority on the various symbol types.
+ DbiStream *Dbi = nullptr;
- std::unique_ptr<NativeRawSymbol> clone() const override;
+public:
+ NativeExeSymbol(NativeSession &Session, SymIndexId Id);
std::unique_ptr<IPDBEnumSymbols>
findChildren(PDB_SymType Type) const override;
@@ -30,9 +33,6 @@ public:
codeview::GUID getGuid() const override;
bool hasCTypes() const override;
bool hasPrivateSymbols() const override;
-
-private:
- PDBFile &File;
};
} // namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
index 5b70ecfa2056..6505a7d39573 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
@@ -19,15 +19,16 @@ namespace pdb {
class NativeSession;
-typedef uint32_t SymIndexId;
-
class NativeRawSymbol : public IPDBRawSymbol {
-public:
- NativeRawSymbol(NativeSession &PDBSession, SymIndexId SymbolId);
+ friend class SymbolCache;
+ virtual void initialize() {}
- virtual std::unique_ptr<NativeRawSymbol> clone() const = 0;
+public:
+ NativeRawSymbol(NativeSession &PDBSession, PDB_SymType Tag,
+ SymIndexId SymbolId);
- void dump(raw_ostream &OS, int Indent) const override;
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
std::unique_ptr<IPDBEnumSymbols>
findChildren(PDB_SymType Type) const override;
@@ -68,25 +69,25 @@ public:
uint32_t getAddressOffset() const override;
uint32_t getAddressSection() const override;
uint32_t getAge() const override;
- uint32_t getArrayIndexTypeId() const override;
+ SymIndexId getArrayIndexTypeId() const override;
uint32_t getBaseDataOffset() const override;
uint32_t getBaseDataSlot() const override;
- uint32_t getBaseSymbolId() const override;
+ SymIndexId getBaseSymbolId() const override;
PDB_BuiltinType getBuiltinType() const override;
uint32_t getBitPosition() const override;
PDB_CallingConv getCallingConvention() const override;
- uint32_t getClassParentId() const override;
+ SymIndexId getClassParentId() const override;
std::string getCompilerName() const override;
uint32_t getCount() const override;
uint32_t getCountLiveRanges() const override;
PDB_Lang getLanguage() const override;
- uint32_t getLexicalParentId() const override;
+ SymIndexId getLexicalParentId() const override;
std::string getLibraryName() const override;
uint32_t getLiveRangeStartAddressOffset() const override;
uint32_t getLiveRangeStartAddressSection() const override;
uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
codeview::RegisterId getLocalBasePointerRegisterId() const override;
- uint32_t getLowerBoundId() const override;
+ SymIndexId getLowerBoundId() const override;
uint32_t getMemorySpaceKind() const override;
std::string getName() const override;
uint32_t getNumberOfAcceleratorPointerTags() const override;
@@ -96,7 +97,7 @@ public:
uint32_t getNumberOfRows() const override;
std::string getObjectFileName() const override;
uint32_t getOemId() const override;
- uint32_t getOemSymbolId() const override;
+ SymIndexId getOemSymbolId() const override;
uint32_t getOffsetInUdt() const override;
PDB_Cpu getPlatform() const override;
uint32_t getRank() const override;
@@ -110,9 +111,9 @@ public:
std::string getSourceFileName() const override;
std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
uint32_t getStride() const override;
- uint32_t getSubTypeId() const override;
+ SymIndexId getSubTypeId() const override;
std::string getSymbolsFileName() const override;
- uint32_t getSymIndexId() const override;
+ SymIndexId getSymIndexId() const override;
uint32_t getTargetOffset() const override;
uint32_t getTargetRelativeVirtualAddress() const override;
uint64_t getTargetVirtualAddress() const override;
@@ -120,16 +121,16 @@ public:
uint32_t getTextureSlot() const override;
uint32_t getTimeStamp() const override;
uint32_t getToken() const override;
- uint32_t getTypeId() const override;
+ SymIndexId getTypeId() const override;
uint32_t getUavSlot() const override;
std::string getUndecoratedName() const override;
std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
- uint32_t getUnmodifiedTypeId() const override;
- uint32_t getUpperBoundId() const override;
+ SymIndexId getUnmodifiedTypeId() const override;
+ SymIndexId getUpperBoundId() const override;
Variant getValue() const override;
uint32_t getVirtualBaseDispIndex() const override;
uint32_t getVirtualBaseOffset() const override;
- uint32_t getVirtualTableShapeId() const override;
+ SymIndexId getVirtualTableShapeId() const override;
std::unique_ptr<PDBSymbolTypeBuiltin>
getVirtualBaseTableType() const override;
PDB_DataKind getDataKind() const override;
@@ -230,6 +231,7 @@ public:
protected:
NativeSession &Session;
+ PDB_SymType Tag;
SymIndexId SymbolId;
};
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSession.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSession.h
index aff7ef2f8f21..4878e47d3121 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSession.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSession.h
@@ -15,9 +15,8 @@
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
-#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
-#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/SymbolCache.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
@@ -25,6 +24,7 @@ namespace llvm {
class MemoryBuffer;
namespace pdb {
class PDBFile;
+class NativeExeSymbol;
class NativeSession : public IPDBSession {
public:
@@ -37,21 +37,10 @@ public:
static Error createFromExe(StringRef Path,
std::unique_ptr<IPDBSession> &Session);
- std::unique_ptr<PDBSymbolCompiland>
- createCompilandSymbol(DbiModuleDescriptor MI);
-
- std::unique_ptr<PDBSymbolTypeEnum>
- createEnumSymbol(codeview::TypeIndex Index);
-
- std::unique_ptr<IPDBEnumSymbols>
- createTypeEnumerator(codeview::TypeLeafKind Kind);
-
- SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI);
-
uint64_t getLoadAddress() const override;
bool setLoadAddress(uint64_t Address) override;
std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
- std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const override;
+ std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const override;
bool addressForVA(uint64_t VA, uint32_t &Section,
uint32_t &Offset) const override;
@@ -104,14 +93,23 @@ public:
std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;
+ std::unique_ptr<IPDBEnumFrameData> getFrameData() const override;
+
PDBFile &getPDBFile() { return *Pdb; }
const PDBFile &getPDBFile() const { return *Pdb; }
+ NativeExeSymbol &getNativeGlobalScope() const;
+ SymbolCache &getSymbolCache() { return Cache; }
+ const SymbolCache &getSymbolCache() const { return Cache; }
+
private:
+ void initializeExeSymbol();
+
std::unique_ptr<PDBFile> Pdb;
std::unique_ptr<BumpPtrAllocator> Allocator;
- std::vector<std::unique_ptr<NativeRawSymbol>> SymbolCache;
- DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;
+
+ SymbolCache Cache;
+ SymIndexId ExeSymbol = 0;
};
} // namespace pdb
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h
new file mode 100644
index 000000000000..acc5eb8ff2c2
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeSymbolEnumerator.h
@@ -0,0 +1,51 @@
+//===- NativeSymbolEnumerator.h - info about enumerator values --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESYMBOLENUMERATOR_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESYMBOLENUMERATOR_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+class NativeTypeEnum;
+
+class NativeSymbolEnumerator : public NativeRawSymbol {
+public:
+ NativeSymbolEnumerator(NativeSession &Session, SymIndexId Id,
+ const NativeTypeEnum &Parent,
+ codeview::EnumeratorRecord Record);
+
+ ~NativeSymbolEnumerator() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ SymIndexId getClassParentId() const override;
+ SymIndexId getLexicalParentId() const override;
+ std::string getName() const override;
+ SymIndexId getTypeId() const override;
+ PDB_DataKind getDataKind() const override;
+ PDB_LocType getLocationType() const override;
+ bool isConstType() const override;
+ bool isVolatileType() const override;
+ bool isUnalignedType() const override;
+ Variant getValue() const override;
+
+protected:
+ const NativeTypeEnum &Parent;
+ codeview::EnumeratorRecord Record;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h
new file mode 100644
index 000000000000..10e68e6df450
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeArray.h
@@ -0,0 +1,50 @@
+//===- NativeTypeArray.h ------------------------------------------ C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEARRAY_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEARRAY_H
+
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeTypeArray : public NativeRawSymbol {
+public:
+ NativeTypeArray(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
+ codeview::ArrayRecord Record);
+ ~NativeTypeArray() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ SymIndexId getArrayIndexTypeId() const override;
+
+ bool isConstType() const override;
+ bool isUnalignedType() const override;
+ bool isVolatileType() const override;
+
+ uint32_t getCount() const override;
+ SymIndexId getTypeId() const override;
+ uint64_t getLength() const override;
+
+protected:
+ codeview::ArrayRecord Record;
+ codeview::TypeIndex Index;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h
index 4f532c6e3829..725dfb89222f 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeBuiltin.h
@@ -1,4 +1,4 @@
-//===- NativeBuiltinSymbol.h -------------------------------------- C++ -*-===//
+//===- NativeTypeBuiltin.h ---------------------------------------- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
-#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEBUILTIN_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEBUILTIN_H
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
@@ -19,15 +19,15 @@ namespace pdb {
class NativeSession;
-class NativeBuiltinSymbol : public NativeRawSymbol {
+class NativeTypeBuiltin : public NativeRawSymbol {
public:
- NativeBuiltinSymbol(NativeSession &PDBSession, SymIndexId Id,
- PDB_BuiltinType T, uint64_t L);
- ~NativeBuiltinSymbol() override;
+ NativeTypeBuiltin(NativeSession &PDBSession, SymIndexId Id,
+ codeview::ModifierOptions Mods, PDB_BuiltinType T,
+ uint64_t L);
+ ~NativeTypeBuiltin() override;
- virtual std::unique_ptr<NativeRawSymbol> clone() const override;
-
- void dump(raw_ostream &OS, int Indent) const override;
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
PDB_SymType getSymTag() const override;
@@ -39,6 +39,7 @@ public:
protected:
NativeSession &Session;
+ codeview::ModifierOptions Mods;
PDB_BuiltinType Type;
uint64_t Length;
};
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h
new file mode 100644
index 000000000000..a5cbefc18111
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeEnum.h
@@ -0,0 +1,75 @@
+//===- NativeTypeEnum.h - info about enum type ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypeBuiltin;
+
+class NativeTypeEnum : public NativeRawSymbol {
+public:
+ NativeTypeEnum(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
+ codeview::EnumRecord Record);
+
+ NativeTypeEnum(NativeSession &Session, SymIndexId Id,
+ NativeTypeEnum &UnmodifiedType,
+ codeview::ModifierRecord Modifier);
+ ~NativeTypeEnum() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type) const override;
+
+ PDB_BuiltinType getBuiltinType() const override;
+ PDB_SymType getSymTag() const override;
+ SymIndexId getUnmodifiedTypeId() const override;
+ bool hasConstructor() const override;
+ bool hasAssignmentOperator() const override;
+ bool hasCastOperator() const override;
+ uint64_t getLength() const override;
+ std::string getName() const override;
+ bool isConstType() const override;
+ bool isVolatileType() const override;
+ bool isUnalignedType() const override;
+ bool isNested() const override;
+ bool hasOverloadedOperator() const override;
+ bool hasNestedTypes() const override;
+ bool isIntrinsic() const override;
+ bool isPacked() const override;
+ bool isScoped() const override;
+ SymIndexId getTypeId() const override;
+ bool isRefUdt() const override;
+ bool isValueUdt() const override;
+ bool isInterfaceUdt() const override;
+
+ const NativeTypeBuiltin &getUnderlyingBuiltinType() const;
+ const codeview::EnumRecord &getEnumRecord() const { return *Record; }
+
+protected:
+ codeview::TypeIndex Index;
+ Optional<codeview::EnumRecord> Record;
+ NativeTypeEnum *UnmodifiedType = nullptr;
+ Optional<codeview::ModifierRecord> Modifiers;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h
new file mode 100644
index 000000000000..1b1b87f6581f
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeFunctionSig.h
@@ -0,0 +1,74 @@
+//===- NativeTypeFunctionSig.h - info about function signature ---*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEFUNCTIONSIG_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEFUNCTIONSIG_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypeUDT;
+
+class NativeTypeFunctionSig : public NativeRawSymbol {
+protected:
+ void initialize() override;
+
+public:
+ NativeTypeFunctionSig(NativeSession &Session, SymIndexId Id,
+ codeview::TypeIndex TI, codeview::ProcedureRecord Proc);
+
+ NativeTypeFunctionSig(NativeSession &Session, SymIndexId Id,
+ codeview::TypeIndex TI,
+ codeview::MemberFunctionRecord MemberFunc);
+
+ ~NativeTypeFunctionSig() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type) const override;
+
+ SymIndexId getClassParentId() const override;
+ PDB_CallingConv getCallingConvention() const override;
+ uint32_t getCount() const override;
+ SymIndexId getTypeId() const override;
+ int32_t getThisAdjust() const override;
+ bool hasConstructor() const override;
+ bool isConstType() const override;
+ bool isConstructorVirtualBase() const override;
+ bool isCxxReturnUdt() const override;
+ bool isUnalignedType() const override;
+ bool isVolatileType() const override;
+
+private:
+ void initializeArgList(codeview::TypeIndex ArgListTI);
+
+ union {
+ codeview::MemberFunctionRecord MemberFunc;
+ codeview::ProcedureRecord Proc;
+ };
+
+ SymIndexId ClassParentId = 0;
+ codeview::TypeIndex Index;
+ codeview::ArgListRecord ArgList;
+ bool IsMemberFunction = false;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h
new file mode 100644
index 000000000000..bcb7431fecf1
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypePointer.h
@@ -0,0 +1,61 @@
+//===- NativeTypePointer.h - info about pointer type -------------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypePointer : public NativeRawSymbol {
+public:
+ // Create a pointer record for a simple type.
+ NativeTypePointer(NativeSession &Session, SymIndexId Id,
+ codeview::TypeIndex TI);
+
+ // Create a pointer record for a non-simple type.
+ NativeTypePointer(NativeSession &Session, SymIndexId Id,
+ codeview::TypeIndex TI, codeview::PointerRecord PR);
+ ~NativeTypePointer() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ SymIndexId getClassParentId() const override;
+ bool isConstType() const override;
+ uint64_t getLength() const override;
+ bool isReference() const override;
+ bool isRValueReference() const override;
+ bool isPointerToDataMember() const override;
+ bool isPointerToMemberFunction() const override;
+ SymIndexId getTypeId() const override;
+ bool isRestrictedType() const override;
+ bool isVolatileType() const override;
+ bool isUnalignedType() const override;
+
+ bool isSingleInheritance() const override;
+ bool isMultipleInheritance() const override;
+ bool isVirtualInheritance() const override;
+
+protected:
+ bool isMemberPointer() const;
+ codeview::TypeIndex TI;
+ Optional<codeview::PointerRecord> Record;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h
new file mode 100644
index 000000000000..06eb6fcf3764
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeTypedef.h
@@ -0,0 +1,42 @@
+//===- NativeTypeTypedef.h - info about typedef ------------------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPETYPEDEF_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPETYPEDEF_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypeTypedef : public NativeRawSymbol {
+public:
+ // Create a pointer record for a non-simple type.
+ NativeTypeTypedef(NativeSession &Session, SymIndexId Id,
+ codeview::UDTSym Typedef);
+
+ ~NativeTypeTypedef() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ std::string getName() const override;
+ SymIndexId getTypeId() const override;
+
+protected:
+ codeview::UDTSym Record;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h
new file mode 100644
index 000000000000..84821d8731be
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeUDT.h
@@ -0,0 +1,74 @@
+//===- NativeTypeUDT.h - info about class/struct type ------------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypeUDT : public NativeRawSymbol {
+public:
+ NativeTypeUDT(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
+ codeview::ClassRecord Class);
+
+ NativeTypeUDT(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
+ codeview::UnionRecord Union);
+
+ NativeTypeUDT(NativeSession &Session, SymIndexId Id,
+ NativeTypeUDT &UnmodifiedType,
+ codeview::ModifierRecord Modifier);
+
+ ~NativeTypeUDT() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ std::string getName() const override;
+ SymIndexId getLexicalParentId() const override;
+ SymIndexId getUnmodifiedTypeId() const override;
+ SymIndexId getVirtualTableShapeId() const override;
+ uint64_t getLength() const override;
+ PDB_UdtType getUdtKind() const override;
+ bool hasConstructor() const override;
+ bool isConstType() const override;
+ bool hasAssignmentOperator() const override;
+ bool hasCastOperator() const override;
+ bool hasNestedTypes() const override;
+ bool hasOverloadedOperator() const override;
+ bool isInterfaceUdt() const override;
+ bool isIntrinsic() const override;
+ bool isNested() const override;
+ bool isPacked() const override;
+ bool isRefUdt() const override;
+ bool isScoped() const override;
+ bool isValueUdt() const override;
+ bool isUnalignedType() const override;
+ bool isVolatileType() const override;
+
+protected:
+ codeview::TypeIndex Index;
+
+ Optional<codeview::ClassRecord> Class;
+ Optional<codeview::UnionRecord> Union;
+ NativeTypeUDT *UnmodifiedType = nullptr;
+ codeview::TagRecord *Tag = nullptr;
+ Optional<codeview::ModifierRecord> Modifiers;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h
new file mode 100644
index 000000000000..a996f34ef859
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeTypeVTShape.h
@@ -0,0 +1,46 @@
+//===- NativeTypeVTShape.h - info about virtual table shape ------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeTypeVTShape : public NativeRawSymbol {
+public:
+ // Create a pointer record for a non-simple type.
+ NativeTypeVTShape(NativeSession &Session, SymIndexId Id,
+ codeview::TypeIndex TI, codeview::VFTableShapeRecord SR);
+
+ ~NativeTypeVTShape() override;
+
+ void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
+ PdbSymbolIdField RecurseIdFields) const override;
+
+ bool isConstType() const override;
+ bool isVolatileType() const override;
+ bool isUnalignedType() const override;
+ uint32_t getCount() const override;
+
+protected:
+ codeview::TypeIndex TI;
+ codeview::VFTableShapeRecord Record;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
index 7f9c4cf9fa83..37458749a8d8 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
@@ -53,7 +53,9 @@ public:
PDBStringTableBuilder &getStringTableBuilder();
GSIStreamBuilder &getGsiBuilder();
- Error commit(StringRef Filename);
+ // If HashPDBContentsToGUID is true on the InfoStreamBuilder, Guid is filled
+ // with the computed PDB GUID on return.
+ Error commit(StringRef Filename, codeview::GUID *Guid);
Expected<uint32_t> getNamedStreamIndex(StringRef Name) const;
Error addNamedStream(StringRef Name, StringRef Data);
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawError.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawError.h
index 3624a7682e38..97d11b4f20d1 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawError.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawError.h
@@ -31,23 +31,29 @@ enum class raw_error_code {
stream_too_long,
invalid_tpi_hash,
};
+} // namespace pdb
+} // namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::pdb::raw_error_code> : std::true_type {};
+} // namespace std
+
+namespace llvm {
+namespace pdb {
+const std::error_category &RawErrCategory();
+
+inline std::error_code make_error_code(raw_error_code E) {
+ return std::error_code(static_cast<int>(E), RawErrCategory());
+}
/// Base class for errors originating when parsing raw PDB files
-class RawError : public ErrorInfo<RawError> {
+class RawError : public ErrorInfo<RawError, StringError> {
public:
+ using ErrorInfo<RawError, StringError>::ErrorInfo; // inherit constructors
+ RawError(const Twine &S) : ErrorInfo(S, raw_error_code::unspecified) {}
static char ID;
- RawError(raw_error_code C);
- RawError(const std::string &Context);
- RawError(raw_error_code C, const std::string &Context);
-
- void log(raw_ostream &OS) const override;
- const std::string &getErrorMessage() const;
- std::error_code convertToErrorCode() const override;
-
-private:
- std::string ErrMsg;
- raw_error_code Code;
};
-}
-}
+} // namespace pdb
+} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 19f592d562e4..8f6d6611c032 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -343,7 +343,6 @@ struct SrcHeaderBlockEntry {
char Reserved[8];
};
-constexpr int I = sizeof(SrcHeaderBlockEntry);
static_assert(sizeof(SrcHeaderBlockEntry) == 40, "Incorrect struct size!");
} // namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/SymbolCache.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/SymbolCache.h
new file mode 100644
index 000000000000..08e1d41e6ee9
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/SymbolCache.h
@@ -0,0 +1,148 @@
+//==- SymbolCache.h - Cache of native symbols and ids ------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLCACHE_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLCACHE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+#include "llvm/Support/Allocator.h"
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+class DbiStream;
+class PDBFile;
+
+class SymbolCache {
+ NativeSession &Session;
+ DbiStream *Dbi = nullptr;
+
+ /// Cache of all stable symbols, indexed by SymIndexId. Just because a
+ /// symbol has been parsed does not imply that it will be stable and have
+ /// an Id. Id allocation is an implementation, with the only guarantee
+ /// being that once an Id is allocated, the symbol can be assumed to be
+ /// cached.
+ std::vector<std::unique_ptr<NativeRawSymbol>> Cache;
+
+ /// For type records from the TPI stream which have been paresd and cached,
+ /// stores a mapping to SymIndexId of the cached symbol.
+ DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;
+
+ /// For field list members which have been parsed and cached, stores a mapping
+ /// from (IndexOfClass, MemberIndex) to the corresponding SymIndexId of the
+ /// cached symbol.
+ DenseMap<std::pair<codeview::TypeIndex, uint32_t>, SymIndexId>
+ FieldListMembersToSymbolId;
+
+ /// List of SymIndexIds for each compiland, indexed by compiland index as they
+ /// appear in the PDB file.
+ std::vector<SymIndexId> Compilands;
+
+ /// Map from global symbol offset to SymIndexId.
+ DenseMap<uint32_t, SymIndexId> GlobalOffsetToSymbolId;
+
+ SymIndexId createSymbolPlaceholder() {
+ SymIndexId Id = Cache.size();
+ Cache.push_back(nullptr);
+ return Id;
+ }
+
+ template <typename ConcreteSymbolT, typename CVRecordT, typename... Args>
+ SymIndexId createSymbolForType(codeview::TypeIndex TI, codeview::CVType CVT,
+ Args &&... ConstructorArgs) {
+ CVRecordT Record;
+ if (auto EC =
+ codeview::TypeDeserializer::deserializeAs<CVRecordT>(CVT, Record)) {
+ consumeError(std::move(EC));
+ return 0;
+ }
+
+ return createSymbol<ConcreteSymbolT>(
+ TI, std::move(Record), std::forward<Args>(ConstructorArgs)...);
+ }
+
+ SymIndexId createSymbolForModifiedType(codeview::TypeIndex ModifierTI,
+ codeview::CVType CVT);
+
+ SymIndexId createSimpleType(codeview::TypeIndex TI,
+ codeview::ModifierOptions Mods);
+
+public:
+ SymbolCache(NativeSession &Session, DbiStream *Dbi);
+
+ template <typename ConcreteSymbolT, typename... Args>
+ SymIndexId createSymbol(Args &&... ConstructorArgs) {
+ SymIndexId Id = Cache.size();
+
+ // Initial construction must not access the cache, since it must be done
+ // atomically.
+ auto Result = llvm::make_unique<ConcreteSymbolT>(
+ Session, Id, std::forward<Args>(ConstructorArgs)...);
+ Result->SymbolId = Id;
+
+ NativeRawSymbol *NRS = static_cast<NativeRawSymbol *>(Result.get());
+ Cache.push_back(std::move(Result));
+
+ // After the item is in the cache, we can do further initialization which
+ // is then allowed to access the cache.
+ NRS->initialize();
+ return Id;
+ }
+
+ std::unique_ptr<IPDBEnumSymbols>
+ createTypeEnumerator(codeview::TypeLeafKind Kind);
+
+ std::unique_ptr<IPDBEnumSymbols>
+ createTypeEnumerator(std::vector<codeview::TypeLeafKind> Kinds);
+
+ std::unique_ptr<IPDBEnumSymbols>
+ createGlobalsEnumerator(codeview::SymbolKind Kind);
+
+ SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI);
+
+ template <typename ConcreteSymbolT, typename... Args>
+ SymIndexId getOrCreateFieldListMember(codeview::TypeIndex FieldListTI,
+ uint32_t Index,
+ Args &&... ConstructorArgs) {
+ SymIndexId SymId = Cache.size();
+ std::pair<codeview::TypeIndex, uint32_t> Key{FieldListTI, Index};
+ auto Result = FieldListMembersToSymbolId.try_emplace(Key, SymId);
+ if (Result.second)
+ SymId =
+ createSymbol<ConcreteSymbolT>(std::forward<Args>(ConstructorArgs)...);
+ else
+ SymId = Result.first->second;
+ return SymId;
+ }
+
+ SymIndexId getOrCreateGlobalSymbolByOffset(uint32_t Offset);
+
+ std::unique_ptr<PDBSymbolCompiland> getOrCreateCompiland(uint32_t Index);
+ uint32_t getNumCompilands() const;
+
+ std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const;
+
+ NativeRawSymbol &getNativeSymbolById(SymIndexId SymbolId) const;
+
+ template <typename ConcreteT>
+ ConcreteT &getNativeSymbolById(SymIndexId SymbolId) const {
+ return static_cast<ConcreteT &>(getNativeSymbolById(SymbolId));
+ }
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiHashing.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiHashing.h
index c1edec7a26fe..c2996ccf1825 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiHashing.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiHashing.h
@@ -18,6 +18,54 @@ namespace pdb {
Expected<uint32_t> hashTypeRecord(const llvm::codeview::CVType &Type);
+struct TagRecordHash {
+ explicit TagRecordHash(codeview::ClassRecord CR, uint32_t Full,
+ uint32_t Forward)
+ : FullRecordHash(Full), ForwardDeclHash(Forward), Class(std::move(CR)) {
+ State = 0;
+ }
+
+ explicit TagRecordHash(codeview::EnumRecord ER, uint32_t Full,
+ uint32_t Forward)
+ : FullRecordHash(Full), ForwardDeclHash(Forward), Enum(std::move(ER)) {
+ State = 1;
+ }
+
+ explicit TagRecordHash(codeview::UnionRecord UR, uint32_t Full,
+ uint32_t Forward)
+ : FullRecordHash(Full), ForwardDeclHash(Forward), Union(std::move(UR)) {
+ State = 2;
+ }
+
+ uint32_t FullRecordHash;
+ uint32_t ForwardDeclHash;
+
+ codeview::TagRecord &getRecord() {
+ switch (State) {
+ case 0:
+ return Class;
+ case 1:
+ return Enum;
+ case 2:
+ return Union;
+ }
+ llvm_unreachable("unreachable!");
+ }
+
+private:
+ union {
+ codeview::ClassRecord Class;
+ codeview::EnumRecord Enum;
+ codeview::UnionRecord Union;
+ };
+
+ uint8_t State = 0;
+};
+
+/// Given a CVType referring to a class, structure, union, or enum, compute
+/// the hash of its forward decl and full decl.
+Expected<TagRecordHash> hashTagRecord(const codeview::CVType &Type);
+
} // end namespace pdb
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
index b77939929ecf..b76576a7a263 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
@@ -58,10 +58,21 @@ public:
codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }
+ Expected<codeview::TypeIndex>
+ findFullDeclForForwardRef(codeview::TypeIndex ForwardRefTI) const;
+
+ std::vector<codeview::TypeIndex> findRecordsByName(StringRef Name) const;
+
+ codeview::CVType getType(codeview::TypeIndex Index);
+
BinarySubstreamRef getTypeRecordsSubstream() const;
Error commit();
+ void buildHashMap();
+
+ bool supportsTypeLookup() const;
+
private:
PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
@@ -77,6 +88,8 @@ private:
FixedStreamArray<codeview::TypeIndexOffset> TypeIndexOffsets;
HashTable<support::ulittle32_t> HashAdjusters;
+ std::vector<std::vector<codeview::TypeIndex>> HashMap;
+
const TpiStreamHeader *Header;
};
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h
index 3c9a19801f89..aaec71aa8c90 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h
@@ -12,6 +12,8 @@
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/Support/raw_ostream.h"
+
#include <unordered_map>
namespace llvm {
@@ -24,6 +26,7 @@ using TagStats = std::unordered_map<PDB_SymType, int>;
raw_ostream &operator<<(raw_ostream &OS, const PDB_VariantType &Value);
raw_ostream &operator<<(raw_ostream &OS, const PDB_CallingConv &Conv);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_BuiltinType &Type);
raw_ostream &operator<<(raw_ostream &OS, const PDB_DataKind &Data);
raw_ostream &operator<<(raw_ostream &OS, const codeview::RegisterId &Reg);
raw_ostream &operator<<(raw_ostream &OS, const PDB_LocType &Loc);
@@ -41,6 +44,15 @@ raw_ostream &operator<<(raw_ostream &OS, const Variant &Value);
raw_ostream &operator<<(raw_ostream &OS, const VersionInfo &Version);
raw_ostream &operator<<(raw_ostream &OS, const TagStats &Stats);
+
+template <typename T>
+void dumpSymbolField(raw_ostream &OS, StringRef Name, T Value, int Indent) {
+ OS << "\n";
+ OS.indent(Indent);
+ OS << Name << ": " << Value;
+}
+
+
} // end namespace pdb
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h
index 04373463212b..3a74f7c3aace 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h
@@ -49,9 +49,22 @@ class IPDBRawSymbol;
class IPDBSession;
#define DECLARE_PDB_SYMBOL_CONCRETE_TYPE(TagValue) \
+private: \
+ using PDBSymbol::PDBSymbol; \
+ friend class PDBSymbol; \
+ \
+public: \
static const PDB_SymType Tag = TagValue; \
static bool classof(const PDBSymbol *S) { return S->getSymTag() == Tag; }
+#define DECLARE_PDB_SYMBOL_CUSTOM_TYPE(Condition) \
+private: \
+ using PDBSymbol::PDBSymbol; \
+ friend class PDBSymbol; \
+ \
+public: \
+ static bool classof(const PDBSymbol *S) { return Condition; }
+
/// PDBSymbol defines the base of the inheritance hierarchy for concrete symbol
/// types (e.g. functions, executables, vtables, etc). All concrete symbol
/// types inherit from PDBSymbol and expose the exact set of methods that are
@@ -59,14 +72,33 @@ class IPDBSession;
/// reference "Lexical and Class Hierarchy of Symbol Types":
/// https://msdn.microsoft.com/en-us/library/370hs6k4.aspx
class PDBSymbol {
+ static std::unique_ptr<PDBSymbol> createSymbol(const IPDBSession &PDBSession,
+ PDB_SymType Tag);
+
protected:
- PDBSymbol(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
- PDBSymbol(PDBSymbol &Symbol);
+ explicit PDBSymbol(const IPDBSession &PDBSession);
+ PDBSymbol(PDBSymbol &&Other);
public:
static std::unique_ptr<PDBSymbol>
- create(const IPDBSession &PDBSession, std::unique_ptr<IPDBRawSymbol> Symbol);
+ create(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> RawSymbol);
+ static std::unique_ptr<PDBSymbol> create(const IPDBSession &PDBSession,
+ IPDBRawSymbol &RawSymbol);
+
+ template <typename ConcreteT>
+ static std::unique_ptr<ConcreteT>
+ createAs(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> RawSymbol) {
+ std::unique_ptr<PDBSymbol> S = create(PDBSession, std::move(RawSymbol));
+ return unique_dyn_cast_or_null<ConcreteT>(std::move(S));
+ }
+ template <typename ConcreteT>
+ static std::unique_ptr<ConcreteT> createAs(const IPDBSession &PDBSession,
+ IPDBRawSymbol &RawSymbol) {
+ std::unique_ptr<PDBSymbol> S = create(PDBSession, RawSymbol);
+ return unique_dyn_cast_or_null<ConcreteT>(std::move(S));
+ }
virtual ~PDBSymbol();
@@ -80,7 +112,8 @@ public:
/// normally goes on the right side of the symbol.
virtual void dumpRight(PDBSymDumper &Dumper) const {}
- void defaultDump(raw_ostream &OS, int Indent) const;
+ void defaultDump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowFlags,
+ PdbSymbolIdField RecurseFlags) const;
void dumpProperties() const;
void dumpChildStats() const;
@@ -94,8 +127,6 @@ public:
return Enumerator->getNext();
}
- std::unique_ptr<PDBSymbol> clone() const;
-
template <typename T>
std::unique_ptr<ConcreteSymbolEnumerator<T>> findAllChildren() const {
auto BaseIter = RawSymbol->findChildren(T::Tag);
@@ -131,7 +162,8 @@ protected:
}
const IPDBSession &Session;
- std::unique_ptr<IPDBRawSymbol> RawSymbol;
+ std::unique_ptr<IPDBRawSymbol> OwnedRawSymbol;
+ IPDBRawSymbol *RawSymbol = nullptr;
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
index 3169146e5b12..ef00df15cb0a 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
@@ -18,12 +18,9 @@ class raw_ostream;
namespace pdb {
class PDBSymbolAnnotation : public PDBSymbol {
-public:
- PDBSymbolAnnotation(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Annotation)
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
index d81da1eaa023..2cf9c72a8886 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolBlock : public PDBSymbol {
-public:
- PDBSymbolBlock(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Block)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
index 9549089c7eb4..04dbd962ebd4 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
@@ -20,12 +20,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolCompiland : public PDBSymbol {
-public:
- PDBSymbolCompiland(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> CompilandSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Compiland)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
index dba50c42cf81..3d651a464d94 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolCompilandDetails : public PDBSymbol {
-public:
- PDBSymbolCompilandDetails(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandDetails)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
void getFrontEndVersion(VersionInfo &Version) const {
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
index 7868f0459086..ffc408314d9a 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
@@ -18,12 +18,8 @@ namespace llvm {
class raw_ostream;
namespace pdb {
class PDBSymbolCompilandEnv : public PDBSymbol {
-public:
- PDBSymbolCompilandEnv(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandEnv)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
index 54f089404262..c29e4c31d3f3 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
@@ -23,12 +23,8 @@ namespace pdb {
/// fit anywhere else in the lexical hierarchy.
/// https://msdn.microsoft.com/en-us/library/d88sf09h.aspx
class PDBSymbolCustom : public PDBSymbol {
-public:
- PDBSymbolCustom(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> CustomSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Custom)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes);
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h
index 76b14bf17784..217e1e976e6b 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h
@@ -21,12 +21,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolData : public PDBSymbol {
-public:
- PDBSymbolData(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> DataSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Data)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAccess)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
index 2c2d74665040..366d0cf4777f 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
@@ -20,12 +20,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolExe : public PDBSymbol {
-public:
- PDBSymbolExe(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> ExeSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Exe)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAge)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
index 05d585d25763..129e557c7f25 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
@@ -22,18 +22,14 @@ class raw_ostream;
namespace pdb {
class PDBSymbolFunc : public PDBSymbol {
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Function)
public:
- PDBSymbolFunc(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> FuncSymbol);
-
void dump(PDBSymDumper &Dumper) const override;
bool isDestructor() const;
std::unique_ptr<IPDBEnumChildren<PDBSymbolData>> getArguments() const;
- DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Function)
-
FORWARD_SYMBOL_METHOD(getAccess)
FORWARD_SYMBOL_METHOD(getAddressOffset)
FORWARD_SYMBOL_METHOD(getAddressSection)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
index 3341bd9b30fd..18db8a50fd1b 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
@@ -20,12 +20,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolFuncDebugEnd : public PDBSymbol {
-public:
- PDBSymbolFuncDebugEnd(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> FuncDebugEndSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugEnd)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
index 6729838597c8..83d82f0cbcc5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolFuncDebugStart : public PDBSymbol {
-public:
- PDBSymbolFuncDebugStart(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> FuncDebugStartSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugStart)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
index c2b1c28c929e..8b2617fcd757 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolLabel : public PDBSymbol {
-public:
- PDBSymbolLabel(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> LabelSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Label)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
index c9e6ee67c575..9def3edb469a 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolPublicSymbol : public PDBSymbol {
-public:
- PDBSymbolPublicSymbol(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> PublicSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PublicSymbol)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAddressOffset)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
index 614fad86caa8..7bb0555362db 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolThunk : public PDBSymbol {
-public:
- PDBSymbolThunk(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> ThunkSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Thunk)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAccess)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
index 39b7d3b300ea..488f668bdc10 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeArray : public PDBSymbol {
-public:
- PDBSymbolTypeArray(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> ArrayTypeSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ArrayType)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
void dumpRight(PDBSymDumper &Dumper) const override;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
index d607a3d81170..550deedd7504 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
@@ -22,12 +22,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeBaseClass : public PDBSymbol {
-public:
- PDBSymbolTypeBaseClass(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BaseClass)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getAccess)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
index 5b1863c42a04..e07e88802b8f 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeBuiltin : public PDBSymbol {
-public:
- PDBSymbolTypeBuiltin(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BuiltinType)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getBuiltinType)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
index 199b3f8b304e..0d8979c9c5c5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeCustom : public PDBSymbol {
-public:
- PDBSymbolTypeCustom(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CustomType)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getOemId)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
index e635eb5bbf6f..58292a63501f 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeDimension : public PDBSymbol {
-public:
- PDBSymbolTypeDimension(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Dimension)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getLowerBoundId)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
index ddbe7e58f183..f463047bb5b5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
@@ -21,12 +21,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeEnum : public PDBSymbol {
-public:
- PDBSymbolTypeEnum(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> EnumTypeSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Enum)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getBuiltinType)
@@ -38,6 +34,7 @@ public:
FORWARD_SYMBOL_METHOD(hasNestedTypes)
FORWARD_SYMBOL_METHOD(getLength)
FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
+ FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
FORWARD_SYMBOL_METHOD(getName)
FORWARD_SYMBOL_METHOD(getSrcLineOnTypeDefn)
FORWARD_SYMBOL_METHOD(isNested)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
index 24c13128111f..5b940b0737af 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeFriend : public PDBSymbol {
-public:
- PDBSymbolTypeFriend(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Friend)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getClassParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
index 3855999c473f..074cb418fc82 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeFunctionArg : public PDBSymbol {
-public:
- PDBSymbolTypeFunctionArg(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionArg)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getClassParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
index abd4cf5effa2..dfdf436197c3 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeFunctionSig : public PDBSymbol {
-public:
- PDBSymbolTypeFunctionSig(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionSig)
-
+public:
std::unique_ptr<IPDBEnumSymbols> getArguments() const;
void dump(PDBSymDumper &Dumper) const override;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
index 31cf5363dde1..d716abd640c6 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeManaged : public PDBSymbol {
-public:
- PDBSymbolTypeManaged(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ManagedType)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getName)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
index 7612ebac31dd..300d6722fc4d 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
@@ -19,16 +19,13 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypePointer : public PDBSymbol {
-public:
- PDBSymbolTypePointer(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PointerType)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
void dumpRight(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_ID_METHOD(getClassParent)
FORWARD_SYMBOL_METHOD(getLength)
FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
FORWARD_SYMBOL_METHOD(isReference)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
index 16c1d1b88c6d..d6e2a36486d5 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeTypedef : public PDBSymbol {
-public:
- PDBSymbolTypeTypedef(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Typedef)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(getBuiltinType)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
index e259b6dca3d5..937dd6c87221 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
@@ -23,17 +23,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeUDT : public PDBSymbol {
-public:
- PDBSymbolTypeUDT(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> UDTSymbol);
-
- std::unique_ptr<PDBSymbolTypeUDT> clone() const {
- return getSession().getConcreteSymbolById<PDBSymbolTypeUDT>(
- getSymIndexId());
- }
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UDT)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getClassParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
index e270c2b7eb95..6efce4bbd686 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeVTable : public PDBSymbol {
-public:
- PDBSymbolTypeVTable(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> VtblSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTable)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getClassParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
index 8acaabea5bb8..8949052b0c0f 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
@@ -19,12 +19,8 @@ class raw_ostream;
namespace pdb {
class PDBSymbolTypeVTableShape : public PDBSymbol {
-public:
- PDBSymbolTypeVTableShape(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> VtblShapeSymbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTableShape)
-
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_METHOD(isConstType)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
index de43e47badbd..e935ac6ce0dc 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
@@ -18,16 +18,11 @@ class raw_ostream;
namespace pdb {
class PDBSymbolUnknown : public PDBSymbol {
-public:
- PDBSymbolUnknown(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> UnknownSymbol);
+ DECLARE_PDB_SYMBOL_CUSTOM_TYPE(S->getSymTag() == PDB_SymType::None ||
+ S->getSymTag() >= PDB_SymType::Max)
+public:
void dump(PDBSymDumper &Dumper) const override;
-
- static bool classof(const PDBSymbol *S) {
- return (S->getSymTag() == PDB_SymType::None ||
- S->getSymTag() >= PDB_SymType::Max);
- }
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
index 70fbd5b84c34..4e8c99fc8d89 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
@@ -19,12 +19,9 @@ class raw_ostream;
namespace pdb {
class PDBSymbolUsingNamespace : public PDBSymbol {
-public:
- PDBSymbolUsingNamespace(const IPDBSession &PDBSession,
- std::unique_ptr<IPDBRawSymbol> Symbol);
-
DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UsingNamespace)
+public:
void dump(PDBSymDumper &Dumper) const override;
FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h
index da6cb1d26771..917f3ed73910 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h
@@ -12,6 +12,7 @@
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include <cctype>
#include <cstddef>
@@ -22,6 +23,8 @@
namespace llvm {
namespace pdb {
+typedef uint32_t SymIndexId;
+
class IPDBDataStream;
class IPDBInjectedSource;
class IPDBLineNumber;
@@ -69,6 +72,7 @@ using IPDBEnumLineNumbers = IPDBEnumChildren<IPDBLineNumber>;
using IPDBEnumTables = IPDBEnumChildren<IPDBTable>;
using IPDBEnumInjectedSources = IPDBEnumChildren<IPDBInjectedSource>;
using IPDBEnumSectionContribs = IPDBEnumChildren<IPDBSectionContrib>;
+using IPDBEnumFrameData = IPDBEnumChildren<IPDBFrameData>;
/// Specifies which PDB reader implementation is to be used. Only a value
/// of PDB_ReaderType::DIA is currently supported, but Native is in the works.
@@ -208,6 +212,18 @@ enum class PDB_SymType {
CustomType,
ManagedType,
Dimension,
+ CallSite,
+ InlineSite,
+ BaseInterface,
+ VectorType,
+ MatrixType,
+ HLSLType,
+ Caller,
+ Callee,
+ Export,
+ HeapAllocationSite,
+ CoffGroup,
+ Inlinee,
Max
};
@@ -334,6 +350,36 @@ enum PDB_VariantType {
struct Variant {
Variant() = default;
+ explicit Variant(bool V) : Type(PDB_VariantType::Bool) { Value.Bool = V; }
+ explicit Variant(int8_t V) : Type(PDB_VariantType::Int8) { Value.Int8 = V; }
+ explicit Variant(int16_t V) : Type(PDB_VariantType::Int16) {
+ Value.Int16 = V;
+ }
+ explicit Variant(int32_t V) : Type(PDB_VariantType::Int32) {
+ Value.Int32 = V;
+ }
+ explicit Variant(int64_t V) : Type(PDB_VariantType::Int64) {
+ Value.Int64 = V;
+ }
+ explicit Variant(float V) : Type(PDB_VariantType::Single) {
+ Value.Single = V;
+ }
+ explicit Variant(double V) : Type(PDB_VariantType::Double) {
+ Value.Double = V;
+ }
+ explicit Variant(uint8_t V) : Type(PDB_VariantType::UInt8) {
+ Value.UInt8 = V;
+ }
+ explicit Variant(uint16_t V) : Type(PDB_VariantType::UInt16) {
+ Value.UInt16 = V;
+ }
+ explicit Variant(uint32_t V) : Type(PDB_VariantType::UInt32) {
+ Value.UInt32 = V;
+ }
+ explicit Variant(uint64_t V) : Type(PDB_VariantType::UInt64) {
+ Value.UInt64 = V;
+ }
+
Variant(const Variant &Other) {
*this = Other;
}
diff --git a/contrib/llvm/include/llvm/Demangle/Compiler.h b/contrib/llvm/include/llvm/Demangle/Compiler.h
new file mode 100644
index 000000000000..248d6e3a7faa
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/Compiler.h
@@ -0,0 +1,93 @@
+//===--- Compiler.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+// This file contains a variety of feature test macros copied from
+// include/llvm/Support/Compiler.h so that LLVMDemangle does not need to take
+// a dependency on LLVMSupport.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEMANGLE_COMPILER_H
+#define LLVM_DEMANGLE_COMPILER_H
+
+#ifdef _MSC_VER
+// snprintf is implemented in VS 2015
+#if _MSC_VER < 1900
+#define snprintf _snprintf_s
+#endif
+#endif
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#ifndef __has_cpp_attribute
+#define __has_cpp_attribute(x) 0
+#endif
+
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#ifndef LLVM_GNUC_PREREQ
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define LLVM_GNUC_PREREQ(maj, min, patch) \
+ ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
+ ((maj) << 20) + ((min) << 10) + (patch))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define LLVM_GNUC_PREREQ(maj, min, patch) \
+ ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
+#else
+#define LLVM_GNUC_PREREQ(maj, min, patch) 0
+#endif
+#endif
+
+#if __has_attribute(used) || LLVM_GNUC_PREREQ(3, 1, 0)
+#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
+#else
+#define LLVM_ATTRIBUTE_USED
+#endif
+
+#if __has_builtin(__builtin_unreachable) || LLVM_GNUC_PREREQ(4, 5, 0)
+#define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
+#elif defined(_MSC_VER)
+#define LLVM_BUILTIN_UNREACHABLE __assume(false)
+#endif
+
+#if __has_attribute(noinline) || LLVM_GNUC_PREREQ(3, 4, 0)
+#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+#define LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
+#else
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
+#define LLVM_FALLTHROUGH [[fallthrough]]
+#elif __has_cpp_attribute(gnu::fallthrough)
+#define LLVM_FALLTHROUGH [[gnu::fallthrough]]
+#elif !__cplusplus
+// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
+// error when __has_cpp_attribute is given a scoped attribute in C mode.
+#define LLVM_FALLTHROUGH
+#elif __has_cpp_attribute(clang::fallthrough)
+#define LLVM_FALLTHROUGH [[clang::fallthrough]]
+#else
+#define LLVM_FALLTHROUGH
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm/Demangle/Demangle.h b/contrib/llvm/include/llvm/Demangle/Demangle.h
index df7753f23b87..4c9dc9569e18 100644
--- a/contrib/llvm/include/llvm/Demangle/Demangle.h
+++ b/contrib/llvm/include/llvm/Demangle/Demangle.h
@@ -7,6 +7,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEMANGLE_DEMANGLE_H
+#define LLVM_DEMANGLE_DEMANGLE_H
+
#include <cstddef>
namespace llvm {
@@ -27,8 +30,11 @@ enum : int {
char *itaniumDemangle(const char *mangled_name, char *buf, size_t *n,
int *status);
+
+
+enum MSDemangleFlags { MSDF_None = 0, MSDF_DumpBackrefs = 1 << 0 };
char *microsoftDemangle(const char *mangled_name, char *buf, size_t *n,
- int *status);
+ int *status, MSDemangleFlags Flags = MSDF_None);
/// "Partial" demangler. This supports demangling a string into an AST
/// (typically an intermediate stage in itaniumDemangle) and querying certain
@@ -86,3 +92,5 @@ private:
void *Context;
};
} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Demangle/ItaniumDemangle.h b/contrib/llvm/include/llvm/Demangle/ItaniumDemangle.h
new file mode 100644
index 000000000000..0b9187f30a5a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/ItaniumDemangle.h
@@ -0,0 +1,5184 @@
+//===------------------------- ItaniumDemangle.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEMANGLE_ITANIUMDEMANGLE_H
+#define LLVM_DEMANGLE_ITANIUMDEMANGLE_H
+
+// FIXME: (possibly) incomplete list of features that clang mangles that this
+// file does not yet support:
+// - C++ modules TS
+
+#include "llvm/Demangle/Compiler.h"
+#include "llvm/Demangle/StringView.h"
+#include "llvm/Demangle/Utility.h"
+
+#include <cassert>
+#include <cctype>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <numeric>
+#include <utility>
+
+#define FOR_EACH_NODE_KIND(X) \
+ X(NodeArrayNode) \
+ X(DotSuffix) \
+ X(VendorExtQualType) \
+ X(QualType) \
+ X(ConversionOperatorType) \
+ X(PostfixQualifiedType) \
+ X(ElaboratedTypeSpefType) \
+ X(NameType) \
+ X(AbiTagAttr) \
+ X(EnableIfAttr) \
+ X(ObjCProtoName) \
+ X(PointerType) \
+ X(ReferenceType) \
+ X(PointerToMemberType) \
+ X(ArrayType) \
+ X(FunctionType) \
+ X(NoexceptSpec) \
+ X(DynamicExceptionSpec) \
+ X(FunctionEncoding) \
+ X(LiteralOperator) \
+ X(SpecialName) \
+ X(CtorVtableSpecialName) \
+ X(QualifiedName) \
+ X(NestedName) \
+ X(LocalName) \
+ X(VectorType) \
+ X(PixelVectorType) \
+ X(ParameterPack) \
+ X(TemplateArgumentPack) \
+ X(ParameterPackExpansion) \
+ X(TemplateArgs) \
+ X(ForwardTemplateReference) \
+ X(NameWithTemplateArgs) \
+ X(GlobalQualifiedName) \
+ X(StdQualifiedName) \
+ X(ExpandedSpecialSubstitution) \
+ X(SpecialSubstitution) \
+ X(CtorDtorName) \
+ X(DtorName) \
+ X(UnnamedTypeName) \
+ X(ClosureTypeName) \
+ X(StructuredBindingName) \
+ X(BinaryExpr) \
+ X(ArraySubscriptExpr) \
+ X(PostfixExpr) \
+ X(ConditionalExpr) \
+ X(MemberExpr) \
+ X(EnclosingExpr) \
+ X(CastExpr) \
+ X(SizeofParamPackExpr) \
+ X(CallExpr) \
+ X(NewExpr) \
+ X(DeleteExpr) \
+ X(PrefixExpr) \
+ X(FunctionParam) \
+ X(ConversionExpr) \
+ X(InitListExpr) \
+ X(FoldExpr) \
+ X(ThrowExpr) \
+ X(BoolExpr) \
+ X(IntegerCastExpr) \
+ X(IntegerLiteral) \
+ X(FloatLiteral) \
+ X(DoubleLiteral) \
+ X(LongDoubleLiteral) \
+ X(BracedExpr) \
+ X(BracedRangeExpr)
+
+namespace llvm {
+namespace itanium_demangle {
+// Base class of all AST nodes. The AST is built by the parser, then is
+// traversed by the printLeft/Right functions to produce a demangled string.
+class Node {
+public:
+ enum Kind : unsigned char {
+#define ENUMERATOR(NodeKind) K ## NodeKind,
+ FOR_EACH_NODE_KIND(ENUMERATOR)
+#undef ENUMERATOR
+ };
+
+ /// Three-way bool to track a cached value. Unknown is possible if this node
+ /// has an unexpanded parameter pack below it that may affect this cache.
+ enum class Cache : unsigned char { Yes, No, Unknown, };
+
+private:
+ Kind K;
+
+ // FIXME: Make these protected.
+public:
+ /// Tracks if this node has a component on its right side, in which case we
+ /// need to call printRight.
+ Cache RHSComponentCache;
+
+ /// Track if this node is a (possibly qualified) array type. This can affect
+ /// how we format the output string.
+ Cache ArrayCache;
+
+ /// Track if this node is a (possibly qualified) function type. This can
+ /// affect how we format the output string.
+ Cache FunctionCache;
+
+public:
+ Node(Kind K_, Cache RHSComponentCache_ = Cache::No,
+ Cache ArrayCache_ = Cache::No, Cache FunctionCache_ = Cache::No)
+ : K(K_), RHSComponentCache(RHSComponentCache_), ArrayCache(ArrayCache_),
+ FunctionCache(FunctionCache_) {}
+
+ /// Visit the most-derived object corresponding to this object.
+ template<typename Fn> void visit(Fn F) const;
+
+ // The following function is provided by all derived classes:
+ //
+ // Call F with arguments that, when passed to the constructor of this node,
+ // would construct an equivalent node.
+ //template<typename Fn> void match(Fn F) const;
+
+ bool hasRHSComponent(OutputStream &S) const {
+ if (RHSComponentCache != Cache::Unknown)
+ return RHSComponentCache == Cache::Yes;
+ return hasRHSComponentSlow(S);
+ }
+
+ bool hasArray(OutputStream &S) const {
+ if (ArrayCache != Cache::Unknown)
+ return ArrayCache == Cache::Yes;
+ return hasArraySlow(S);
+ }
+
+ bool hasFunction(OutputStream &S) const {
+ if (FunctionCache != Cache::Unknown)
+ return FunctionCache == Cache::Yes;
+ return hasFunctionSlow(S);
+ }
+
+ Kind getKind() const { return K; }
+
+ virtual bool hasRHSComponentSlow(OutputStream &) const { return false; }
+ virtual bool hasArraySlow(OutputStream &) const { return false; }
+ virtual bool hasFunctionSlow(OutputStream &) const { return false; }
+
+ // Dig through "glue" nodes like ParameterPack and ForwardTemplateReference to
+ // get at a node that actually represents some concrete syntax.
+ virtual const Node *getSyntaxNode(OutputStream &) const {
+ return this;
+ }
+
+ void print(OutputStream &S) const {
+ printLeft(S);
+ if (RHSComponentCache != Cache::No)
+ printRight(S);
+ }
+
+ // Print the "left" side of this Node into OutputStream.
+ virtual void printLeft(OutputStream &) const = 0;
+
+ // Print the "right". This distinction is necessary to represent C++ types
+ // that appear on the RHS of their subtype, such as arrays or functions.
+ // Since most types don't have such a component, provide a default
+ // implementation.
+ virtual void printRight(OutputStream &) const {}
+
+ virtual StringView getBaseName() const { return StringView(); }
+
+ // Silence compiler warnings, this dtor will never be called.
+ virtual ~Node() = default;
+
+#ifndef NDEBUG
+ LLVM_DUMP_METHOD void dump() const;
+#endif
+};
+
+class NodeArray {
+ Node **Elements;
+ size_t NumElements;
+
+public:
+ NodeArray() : Elements(nullptr), NumElements(0) {}
+ NodeArray(Node **Elements_, size_t NumElements_)
+ : Elements(Elements_), NumElements(NumElements_) {}
+
+ bool empty() const { return NumElements == 0; }
+ size_t size() const { return NumElements; }
+
+ Node **begin() const { return Elements; }
+ Node **end() const { return Elements + NumElements; }
+
+ Node *operator[](size_t Idx) const { return Elements[Idx]; }
+
+ void printWithComma(OutputStream &S) const {
+ bool FirstElement = true;
+ for (size_t Idx = 0; Idx != NumElements; ++Idx) {
+ size_t BeforeComma = S.getCurrentPosition();
+ if (!FirstElement)
+ S += ", ";
+ size_t AfterComma = S.getCurrentPosition();
+ Elements[Idx]->print(S);
+
+ // Elements[Idx] is an empty parameter pack expansion, we should erase the
+ // comma we just printed.
+ if (AfterComma == S.getCurrentPosition()) {
+ S.setCurrentPosition(BeforeComma);
+ continue;
+ }
+
+ FirstElement = false;
+ }
+ }
+};
+
+struct NodeArrayNode : Node {
+ NodeArray Array;
+ NodeArrayNode(NodeArray Array_) : Node(KNodeArrayNode), Array(Array_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Array); }
+
+ void printLeft(OutputStream &S) const override {
+ Array.printWithComma(S);
+ }
+};
+
+class DotSuffix final : public Node {
+ const Node *Prefix;
+ const StringView Suffix;
+
+public:
+ DotSuffix(const Node *Prefix_, StringView Suffix_)
+ : Node(KDotSuffix), Prefix(Prefix_), Suffix(Suffix_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Prefix, Suffix); }
+
+ void printLeft(OutputStream &s) const override {
+ Prefix->print(s);
+ s += " (";
+ s += Suffix;
+ s += ")";
+ }
+};
+
+class VendorExtQualType final : public Node {
+ const Node *Ty;
+ StringView Ext;
+
+public:
+ VendorExtQualType(const Node *Ty_, StringView Ext_)
+ : Node(KVendorExtQualType), Ty(Ty_), Ext(Ext_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty, Ext); }
+
+ void printLeft(OutputStream &S) const override {
+ Ty->print(S);
+ S += " ";
+ S += Ext;
+ }
+};
+
+enum FunctionRefQual : unsigned char {
+ FrefQualNone,
+ FrefQualLValue,
+ FrefQualRValue,
+};
+
+enum Qualifiers {
+ QualNone = 0,
+ QualConst = 0x1,
+ QualVolatile = 0x2,
+ QualRestrict = 0x4,
+};
+
+inline Qualifiers operator|=(Qualifiers &Q1, Qualifiers Q2) {
+ return Q1 = static_cast<Qualifiers>(Q1 | Q2);
+}
+
+class QualType : public Node {
+protected:
+ const Qualifiers Quals;
+ const Node *Child;
+
+ void printQuals(OutputStream &S) const {
+ if (Quals & QualConst)
+ S += " const";
+ if (Quals & QualVolatile)
+ S += " volatile";
+ if (Quals & QualRestrict)
+ S += " restrict";
+ }
+
+public:
+ QualType(const Node *Child_, Qualifiers Quals_)
+ : Node(KQualType, Child_->RHSComponentCache,
+ Child_->ArrayCache, Child_->FunctionCache),
+ Quals(Quals_), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Child, Quals); }
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ return Child->hasRHSComponent(S);
+ }
+ bool hasArraySlow(OutputStream &S) const override {
+ return Child->hasArray(S);
+ }
+ bool hasFunctionSlow(OutputStream &S) const override {
+ return Child->hasFunction(S);
+ }
+
+ void printLeft(OutputStream &S) const override {
+ Child->printLeft(S);
+ printQuals(S);
+ }
+
+ void printRight(OutputStream &S) const override { Child->printRight(S); }
+};
+
+class ConversionOperatorType final : public Node {
+ const Node *Ty;
+
+public:
+ ConversionOperatorType(const Node *Ty_)
+ : Node(KConversionOperatorType), Ty(Ty_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "operator ";
+ Ty->print(S);
+ }
+};
+
+class PostfixQualifiedType final : public Node {
+ const Node *Ty;
+ const StringView Postfix;
+
+public:
+ PostfixQualifiedType(Node *Ty_, StringView Postfix_)
+ : Node(KPostfixQualifiedType), Ty(Ty_), Postfix(Postfix_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty, Postfix); }
+
+ void printLeft(OutputStream &s) const override {
+ Ty->printLeft(s);
+ s += Postfix;
+ }
+};
+
+class NameType final : public Node {
+ const StringView Name;
+
+public:
+ NameType(StringView Name_) : Node(KNameType), Name(Name_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Name); }
+
+ StringView getName() const { return Name; }
+ StringView getBaseName() const override { return Name; }
+
+ void printLeft(OutputStream &s) const override { s += Name; }
+};
+
+class ElaboratedTypeSpefType : public Node {
+ StringView Kind;
+ Node *Child;
+public:
+ ElaboratedTypeSpefType(StringView Kind_, Node *Child_)
+ : Node(KElaboratedTypeSpefType), Kind(Kind_), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Kind, Child); }
+
+ void printLeft(OutputStream &S) const override {
+ S += Kind;
+ S += ' ';
+ Child->print(S);
+ }
+};
+
+struct AbiTagAttr : Node {
+ Node *Base;
+ StringView Tag;
+
+ AbiTagAttr(Node* Base_, StringView Tag_)
+ : Node(KAbiTagAttr, Base_->RHSComponentCache,
+ Base_->ArrayCache, Base_->FunctionCache),
+ Base(Base_), Tag(Tag_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Base, Tag); }
+
+ void printLeft(OutputStream &S) const override {
+ Base->printLeft(S);
+ S += "[abi:";
+ S += Tag;
+ S += "]";
+ }
+};
+
+class EnableIfAttr : public Node {
+ NodeArray Conditions;
+public:
+ EnableIfAttr(NodeArray Conditions_)
+ : Node(KEnableIfAttr), Conditions(Conditions_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Conditions); }
+
+ void printLeft(OutputStream &S) const override {
+ S += " [enable_if:";
+ Conditions.printWithComma(S);
+ S += ']';
+ }
+};
+
+class ObjCProtoName : public Node {
+ const Node *Ty;
+ StringView Protocol;
+
+ friend class PointerType;
+
+public:
+ ObjCProtoName(const Node *Ty_, StringView Protocol_)
+ : Node(KObjCProtoName), Ty(Ty_), Protocol(Protocol_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty, Protocol); }
+
+ bool isObjCObject() const {
+ return Ty->getKind() == KNameType &&
+ static_cast<const NameType *>(Ty)->getName() == "objc_object";
+ }
+
+ void printLeft(OutputStream &S) const override {
+ Ty->print(S);
+ S += "<";
+ S += Protocol;
+ S += ">";
+ }
+};
+
+class PointerType final : public Node {
+ const Node *Pointee;
+
+public:
+ PointerType(const Node *Pointee_)
+ : Node(KPointerType, Pointee_->RHSComponentCache),
+ Pointee(Pointee_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Pointee); }
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ return Pointee->hasRHSComponent(S);
+ }
+
+ void printLeft(OutputStream &s) const override {
+ // We rewrite objc_object<SomeProtocol>* into id<SomeProtocol>.
+ if (Pointee->getKind() != KObjCProtoName ||
+ !static_cast<const ObjCProtoName *>(Pointee)->isObjCObject()) {
+ Pointee->printLeft(s);
+ if (Pointee->hasArray(s))
+ s += " ";
+ if (Pointee->hasArray(s) || Pointee->hasFunction(s))
+ s += "(";
+ s += "*";
+ } else {
+ const auto *objcProto = static_cast<const ObjCProtoName *>(Pointee);
+ s += "id<";
+ s += objcProto->Protocol;
+ s += ">";
+ }
+ }
+
+ void printRight(OutputStream &s) const override {
+ if (Pointee->getKind() != KObjCProtoName ||
+ !static_cast<const ObjCProtoName *>(Pointee)->isObjCObject()) {
+ if (Pointee->hasArray(s) || Pointee->hasFunction(s))
+ s += ")";
+ Pointee->printRight(s);
+ }
+ }
+};
+
+enum class ReferenceKind {
+ LValue,
+ RValue,
+};
+
+// Represents either a LValue or an RValue reference type.
+class ReferenceType : public Node {
+ const Node *Pointee;
+ ReferenceKind RK;
+
+ mutable bool Printing = false;
+
+ // Dig through any refs to refs, collapsing the ReferenceTypes as we go. The
+ // rule here is rvalue ref to rvalue ref collapses to a rvalue ref, and any
+ // other combination collapses to a lvalue ref.
+ std::pair<ReferenceKind, const Node *> collapse(OutputStream &S) const {
+ auto SoFar = std::make_pair(RK, Pointee);
+ for (;;) {
+ const Node *SN = SoFar.second->getSyntaxNode(S);
+ if (SN->getKind() != KReferenceType)
+ break;
+ auto *RT = static_cast<const ReferenceType *>(SN);
+ SoFar.second = RT->Pointee;
+ SoFar.first = std::min(SoFar.first, RT->RK);
+ }
+ return SoFar;
+ }
+
+public:
+ ReferenceType(const Node *Pointee_, ReferenceKind RK_)
+ : Node(KReferenceType, Pointee_->RHSComponentCache),
+ Pointee(Pointee_), RK(RK_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Pointee, RK); }
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ return Pointee->hasRHSComponent(S);
+ }
+
+ void printLeft(OutputStream &s) const override {
+ if (Printing)
+ return;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ std::pair<ReferenceKind, const Node *> Collapsed = collapse(s);
+ Collapsed.second->printLeft(s);
+ if (Collapsed.second->hasArray(s))
+ s += " ";
+ if (Collapsed.second->hasArray(s) || Collapsed.second->hasFunction(s))
+ s += "(";
+
+ s += (Collapsed.first == ReferenceKind::LValue ? "&" : "&&");
+ }
+ void printRight(OutputStream &s) const override {
+ if (Printing)
+ return;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ std::pair<ReferenceKind, const Node *> Collapsed = collapse(s);
+ if (Collapsed.second->hasArray(s) || Collapsed.second->hasFunction(s))
+ s += ")";
+ Collapsed.second->printRight(s);
+ }
+};
+
+class PointerToMemberType final : public Node {
+ const Node *ClassType;
+ const Node *MemberType;
+
+public:
+ PointerToMemberType(const Node *ClassType_, const Node *MemberType_)
+ : Node(KPointerToMemberType, MemberType_->RHSComponentCache),
+ ClassType(ClassType_), MemberType(MemberType_) {}
+
+ template<typename Fn> void match(Fn F) const { F(ClassType, MemberType); }
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ return MemberType->hasRHSComponent(S);
+ }
+
+ void printLeft(OutputStream &s) const override {
+ MemberType->printLeft(s);
+ if (MemberType->hasArray(s) || MemberType->hasFunction(s))
+ s += "(";
+ else
+ s += " ";
+ ClassType->print(s);
+ s += "::*";
+ }
+
+ void printRight(OutputStream &s) const override {
+ if (MemberType->hasArray(s) || MemberType->hasFunction(s))
+ s += ")";
+ MemberType->printRight(s);
+ }
+};
+
+class NodeOrString {
+ const void *First;
+ const void *Second;
+
+public:
+ /* implicit */ NodeOrString(StringView Str) {
+ const char *FirstChar = Str.begin();
+ const char *SecondChar = Str.end();
+ if (SecondChar == nullptr) {
+ assert(FirstChar == SecondChar);
+ ++FirstChar, ++SecondChar;
+ }
+ First = static_cast<const void *>(FirstChar);
+ Second = static_cast<const void *>(SecondChar);
+ }
+
+ /* implicit */ NodeOrString(Node *N)
+ : First(static_cast<const void *>(N)), Second(nullptr) {}
+ NodeOrString() : First(nullptr), Second(nullptr) {}
+
+ bool isString() const { return Second && First; }
+ bool isNode() const { return First && !Second; }
+ bool isEmpty() const { return !First && !Second; }
+
+ StringView asString() const {
+ assert(isString());
+ return StringView(static_cast<const char *>(First),
+ static_cast<const char *>(Second));
+ }
+
+ const Node *asNode() const {
+ assert(isNode());
+ return static_cast<const Node *>(First);
+ }
+};
+
+class ArrayType final : public Node {
+ const Node *Base;
+ NodeOrString Dimension;
+
+public:
+ ArrayType(const Node *Base_, NodeOrString Dimension_)
+ : Node(KArrayType,
+ /*RHSComponentCache=*/Cache::Yes,
+ /*ArrayCache=*/Cache::Yes),
+ Base(Base_), Dimension(Dimension_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Base, Dimension); }
+
+ bool hasRHSComponentSlow(OutputStream &) const override { return true; }
+ bool hasArraySlow(OutputStream &) const override { return true; }
+
+ void printLeft(OutputStream &S) const override { Base->printLeft(S); }
+
+ void printRight(OutputStream &S) const override {
+ if (S.back() != ']')
+ S += " ";
+ S += "[";
+ if (Dimension.isString())
+ S += Dimension.asString();
+ else if (Dimension.isNode())
+ Dimension.asNode()->print(S);
+ S += "]";
+ Base->printRight(S);
+ }
+};
+
+class FunctionType final : public Node {
+ const Node *Ret;
+ NodeArray Params;
+ Qualifiers CVQuals;
+ FunctionRefQual RefQual;
+ const Node *ExceptionSpec;
+
+public:
+ FunctionType(const Node *Ret_, NodeArray Params_, Qualifiers CVQuals_,
+ FunctionRefQual RefQual_, const Node *ExceptionSpec_)
+ : Node(KFunctionType,
+ /*RHSComponentCache=*/Cache::Yes, /*ArrayCache=*/Cache::No,
+ /*FunctionCache=*/Cache::Yes),
+ Ret(Ret_), Params(Params_), CVQuals(CVQuals_), RefQual(RefQual_),
+ ExceptionSpec(ExceptionSpec_) {}
+
+ template<typename Fn> void match(Fn F) const {
+ F(Ret, Params, CVQuals, RefQual, ExceptionSpec);
+ }
+
+ bool hasRHSComponentSlow(OutputStream &) const override { return true; }
+ bool hasFunctionSlow(OutputStream &) const override { return true; }
+
+ // Handle C++'s ... quirky decl grammar by using the left & right
+ // distinction. Consider:
+ // int (*f(float))(char) {}
+ // f is a function that takes a float and returns a pointer to a function
+ // that takes a char and returns an int. If we're trying to print f, start
+ // by printing out the return types's left, then print our parameters, then
+ // finally print right of the return type.
+ void printLeft(OutputStream &S) const override {
+ Ret->printLeft(S);
+ S += " ";
+ }
+
+ void printRight(OutputStream &S) const override {
+ S += "(";
+ Params.printWithComma(S);
+ S += ")";
+ Ret->printRight(S);
+
+ if (CVQuals & QualConst)
+ S += " const";
+ if (CVQuals & QualVolatile)
+ S += " volatile";
+ if (CVQuals & QualRestrict)
+ S += " restrict";
+
+ if (RefQual == FrefQualLValue)
+ S += " &";
+ else if (RefQual == FrefQualRValue)
+ S += " &&";
+
+ if (ExceptionSpec != nullptr) {
+ S += ' ';
+ ExceptionSpec->print(S);
+ }
+ }
+};
+
+class NoexceptSpec : public Node {
+ const Node *E;
+public:
+ NoexceptSpec(const Node *E_) : Node(KNoexceptSpec), E(E_) {}
+
+ template<typename Fn> void match(Fn F) const { F(E); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "noexcept(";
+ E->print(S);
+ S += ")";
+ }
+};
+
+class DynamicExceptionSpec : public Node {
+ NodeArray Types;
+public:
+ DynamicExceptionSpec(NodeArray Types_)
+ : Node(KDynamicExceptionSpec), Types(Types_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Types); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "throw(";
+ Types.printWithComma(S);
+ S += ')';
+ }
+};
+
+class FunctionEncoding final : public Node {
+ const Node *Ret;
+ const Node *Name;
+ NodeArray Params;
+ const Node *Attrs;
+ Qualifiers CVQuals;
+ FunctionRefQual RefQual;
+
+public:
+ FunctionEncoding(const Node *Ret_, const Node *Name_, NodeArray Params_,
+ const Node *Attrs_, Qualifiers CVQuals_,
+ FunctionRefQual RefQual_)
+ : Node(KFunctionEncoding,
+ /*RHSComponentCache=*/Cache::Yes, /*ArrayCache=*/Cache::No,
+ /*FunctionCache=*/Cache::Yes),
+ Ret(Ret_), Name(Name_), Params(Params_), Attrs(Attrs_),
+ CVQuals(CVQuals_), RefQual(RefQual_) {}
+
+ template<typename Fn> void match(Fn F) const {
+ F(Ret, Name, Params, Attrs, CVQuals, RefQual);
+ }
+
+ Qualifiers getCVQuals() const { return CVQuals; }
+ FunctionRefQual getRefQual() const { return RefQual; }
+ NodeArray getParams() const { return Params; }
+ const Node *getReturnType() const { return Ret; }
+
+ bool hasRHSComponentSlow(OutputStream &) const override { return true; }
+ bool hasFunctionSlow(OutputStream &) const override { return true; }
+
+ const Node *getName() const { return Name; }
+
+ void printLeft(OutputStream &S) const override {
+ if (Ret) {
+ Ret->printLeft(S);
+ if (!Ret->hasRHSComponent(S))
+ S += " ";
+ }
+ Name->print(S);
+ }
+
+ void printRight(OutputStream &S) const override {
+ S += "(";
+ Params.printWithComma(S);
+ S += ")";
+ if (Ret)
+ Ret->printRight(S);
+
+ if (CVQuals & QualConst)
+ S += " const";
+ if (CVQuals & QualVolatile)
+ S += " volatile";
+ if (CVQuals & QualRestrict)
+ S += " restrict";
+
+ if (RefQual == FrefQualLValue)
+ S += " &";
+ else if (RefQual == FrefQualRValue)
+ S += " &&";
+
+ if (Attrs != nullptr)
+ Attrs->print(S);
+ }
+};
+
+class LiteralOperator : public Node {
+ const Node *OpName;
+
+public:
+ LiteralOperator(const Node *OpName_)
+ : Node(KLiteralOperator), OpName(OpName_) {}
+
+ template<typename Fn> void match(Fn F) const { F(OpName); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "operator\"\" ";
+ OpName->print(S);
+ }
+};
+
+class SpecialName final : public Node {
+ const StringView Special;
+ const Node *Child;
+
+public:
+ SpecialName(StringView Special_, const Node *Child_)
+ : Node(KSpecialName), Special(Special_), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Special, Child); }
+
+ void printLeft(OutputStream &S) const override {
+ S += Special;
+ Child->print(S);
+ }
+};
+
+class CtorVtableSpecialName final : public Node {
+ const Node *FirstType;
+ const Node *SecondType;
+
+public:
+ CtorVtableSpecialName(const Node *FirstType_, const Node *SecondType_)
+ : Node(KCtorVtableSpecialName),
+ FirstType(FirstType_), SecondType(SecondType_) {}
+
+ template<typename Fn> void match(Fn F) const { F(FirstType, SecondType); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "construction vtable for ";
+ FirstType->print(S);
+ S += "-in-";
+ SecondType->print(S);
+ }
+};
+
+struct NestedName : Node {
+ Node *Qual;
+ Node *Name;
+
+ NestedName(Node *Qual_, Node *Name_)
+ : Node(KNestedName), Qual(Qual_), Name(Name_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Qual, Name); }
+
+ StringView getBaseName() const override { return Name->getBaseName(); }
+
+ void printLeft(OutputStream &S) const override {
+ Qual->print(S);
+ S += "::";
+ Name->print(S);
+ }
+};
+
+struct LocalName : Node {
+ Node *Encoding;
+ Node *Entity;
+
+ LocalName(Node *Encoding_, Node *Entity_)
+ : Node(KLocalName), Encoding(Encoding_), Entity(Entity_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Encoding, Entity); }
+
+ void printLeft(OutputStream &S) const override {
+ Encoding->print(S);
+ S += "::";
+ Entity->print(S);
+ }
+};
+
+class QualifiedName final : public Node {
+ // qualifier::name
+ const Node *Qualifier;
+ const Node *Name;
+
+public:
+ QualifiedName(const Node *Qualifier_, const Node *Name_)
+ : Node(KQualifiedName), Qualifier(Qualifier_), Name(Name_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Qualifier, Name); }
+
+ StringView getBaseName() const override { return Name->getBaseName(); }
+
+ void printLeft(OutputStream &S) const override {
+ Qualifier->print(S);
+ S += "::";
+ Name->print(S);
+ }
+};
+
+class VectorType final : public Node {
+ const Node *BaseType;
+ const NodeOrString Dimension;
+
+public:
+ VectorType(const Node *BaseType_, NodeOrString Dimension_)
+ : Node(KVectorType), BaseType(BaseType_),
+ Dimension(Dimension_) {}
+
+ template<typename Fn> void match(Fn F) const { F(BaseType, Dimension); }
+
+ void printLeft(OutputStream &S) const override {
+ BaseType->print(S);
+ S += " vector[";
+ if (Dimension.isNode())
+ Dimension.asNode()->print(S);
+ else if (Dimension.isString())
+ S += Dimension.asString();
+ S += "]";
+ }
+};
+
+class PixelVectorType final : public Node {
+ const NodeOrString Dimension;
+
+public:
+ PixelVectorType(NodeOrString Dimension_)
+ : Node(KPixelVectorType), Dimension(Dimension_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Dimension); }
+
+ void printLeft(OutputStream &S) const override {
+ // FIXME: This should demangle as "vector pixel".
+ S += "pixel vector[";
+ S += Dimension.asString();
+ S += "]";
+ }
+};
+
+/// An unexpanded parameter pack (either in the expression or type context). If
+/// this AST is correct, this node will have a ParameterPackExpansion node above
+/// it.
+///
+/// This node is created when some <template-args> are found that apply to an
+/// <encoding>, and is stored in the TemplateParams table. In order for this to
+/// appear in the final AST, it has to referenced via a <template-param> (ie,
+/// T_).
+class ParameterPack final : public Node {
+ NodeArray Data;
+
+ // Setup OutputStream for a pack expansion unless we're already expanding one.
+ void initializePackExpansion(OutputStream &S) const {
+ if (S.CurrentPackMax == std::numeric_limits<unsigned>::max()) {
+ S.CurrentPackMax = static_cast<unsigned>(Data.size());
+ S.CurrentPackIndex = 0;
+ }
+ }
+
+public:
+ ParameterPack(NodeArray Data_) : Node(KParameterPack), Data(Data_) {
+ ArrayCache = FunctionCache = RHSComponentCache = Cache::Unknown;
+ if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
+ return P->ArrayCache == Cache::No;
+ }))
+ ArrayCache = Cache::No;
+ if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
+ return P->FunctionCache == Cache::No;
+ }))
+ FunctionCache = Cache::No;
+ if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
+ return P->RHSComponentCache == Cache::No;
+ }))
+ RHSComponentCache = Cache::No;
+ }
+
+ template<typename Fn> void match(Fn F) const { F(Data); }
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ return Idx < Data.size() && Data[Idx]->hasRHSComponent(S);
+ }
+ bool hasArraySlow(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ return Idx < Data.size() && Data[Idx]->hasArray(S);
+ }
+ bool hasFunctionSlow(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ return Idx < Data.size() && Data[Idx]->hasFunction(S);
+ }
+ const Node *getSyntaxNode(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ return Idx < Data.size() ? Data[Idx]->getSyntaxNode(S) : this;
+ }
+
+ void printLeft(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ if (Idx < Data.size())
+ Data[Idx]->printLeft(S);
+ }
+ void printRight(OutputStream &S) const override {
+ initializePackExpansion(S);
+ size_t Idx = S.CurrentPackIndex;
+ if (Idx < Data.size())
+ Data[Idx]->printRight(S);
+ }
+};
+
+/// A variadic template argument. This node represents an occurrence of
+/// J<something>E in some <template-args>. It isn't itself unexpanded, unless
+/// one of it's Elements is. The parser inserts a ParameterPack into the
+/// TemplateParams table if the <template-args> this pack belongs to apply to an
+/// <encoding>.
+class TemplateArgumentPack final : public Node {
+ NodeArray Elements;
+public:
+ TemplateArgumentPack(NodeArray Elements_)
+ : Node(KTemplateArgumentPack), Elements(Elements_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Elements); }
+
+ NodeArray getElements() const { return Elements; }
+
+ void printLeft(OutputStream &S) const override {
+ Elements.printWithComma(S);
+ }
+};
+
+/// A pack expansion. Below this node, there are some unexpanded ParameterPacks
+/// which each have Child->ParameterPackSize elements.
+class ParameterPackExpansion final : public Node {
+ const Node *Child;
+
+public:
+ ParameterPackExpansion(const Node *Child_)
+ : Node(KParameterPackExpansion), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Child); }
+
+ const Node *getChild() const { return Child; }
+
+ void printLeft(OutputStream &S) const override {
+ constexpr unsigned Max = std::numeric_limits<unsigned>::max();
+ SwapAndRestore<unsigned> SavePackIdx(S.CurrentPackIndex, Max);
+ SwapAndRestore<unsigned> SavePackMax(S.CurrentPackMax, Max);
+ size_t StreamPos = S.getCurrentPosition();
+
+ // Print the first element in the pack. If Child contains a ParameterPack,
+ // it will set up S.CurrentPackMax and print the first element.
+ Child->print(S);
+
+ // No ParameterPack was found in Child. This can occur if we've found a pack
+ // expansion on a <function-param>.
+ if (S.CurrentPackMax == Max) {
+ S += "...";
+ return;
+ }
+
+ // We found a ParameterPack, but it has no elements. Erase whatever we may
+ // of printed.
+ if (S.CurrentPackMax == 0) {
+ S.setCurrentPosition(StreamPos);
+ return;
+ }
+
+ // Else, iterate through the rest of the elements in the pack.
+ for (unsigned I = 1, E = S.CurrentPackMax; I < E; ++I) {
+ S += ", ";
+ S.CurrentPackIndex = I;
+ Child->print(S);
+ }
+ }
+};
+
+class TemplateArgs final : public Node {
+ NodeArray Params;
+
+public:
+ TemplateArgs(NodeArray Params_) : Node(KTemplateArgs), Params(Params_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Params); }
+
+ NodeArray getParams() { return Params; }
+
+ void printLeft(OutputStream &S) const override {
+ S += "<";
+ Params.printWithComma(S);
+ if (S.back() == '>')
+ S += " ";
+ S += ">";
+ }
+};
+
+/// A forward-reference to a template argument that was not known at the point
+/// where the template parameter name was parsed in a mangling.
+///
+/// This is created when demangling the name of a specialization of a
+/// conversion function template:
+///
+/// \code
+/// struct A {
+/// template<typename T> operator T*();
+/// };
+/// \endcode
+///
+/// When demangling a specialization of the conversion function template, we
+/// encounter the name of the template (including the \c T) before we reach
+/// the template argument list, so we cannot substitute the parameter name
+/// for the corresponding argument while parsing. Instead, we create a
+/// \c ForwardTemplateReference node that is resolved after we parse the
+/// template arguments.
+struct ForwardTemplateReference : Node {
+ size_t Index;
+ Node *Ref = nullptr;
+
+ // If we're currently printing this node. It is possible (though invalid) for
+ // a forward template reference to refer to itself via a substitution. This
+ // creates a cyclic AST, which will stack overflow printing. To fix this, bail
+ // out if more than one print* function is active.
+ mutable bool Printing = false;
+
+ ForwardTemplateReference(size_t Index_)
+ : Node(KForwardTemplateReference, Cache::Unknown, Cache::Unknown,
+ Cache::Unknown),
+ Index(Index_) {}
+
+ // We don't provide a matcher for these, because the value of the node is
+ // not determined by its construction parameters, and it generally needs
+ // special handling.
+ template<typename Fn> void match(Fn F) const = delete;
+
+ bool hasRHSComponentSlow(OutputStream &S) const override {
+ if (Printing)
+ return false;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ return Ref->hasRHSComponent(S);
+ }
+ bool hasArraySlow(OutputStream &S) const override {
+ if (Printing)
+ return false;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ return Ref->hasArray(S);
+ }
+ bool hasFunctionSlow(OutputStream &S) const override {
+ if (Printing)
+ return false;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ return Ref->hasFunction(S);
+ }
+ const Node *getSyntaxNode(OutputStream &S) const override {
+ if (Printing)
+ return this;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ return Ref->getSyntaxNode(S);
+ }
+
+ void printLeft(OutputStream &S) const override {
+ if (Printing)
+ return;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ Ref->printLeft(S);
+ }
+ void printRight(OutputStream &S) const override {
+ if (Printing)
+ return;
+ SwapAndRestore<bool> SavePrinting(Printing, true);
+ Ref->printRight(S);
+ }
+};
+
+struct NameWithTemplateArgs : Node {
+ // name<template_args>
+ Node *Name;
+ Node *TemplateArgs;
+
+ NameWithTemplateArgs(Node *Name_, Node *TemplateArgs_)
+ : Node(KNameWithTemplateArgs), Name(Name_), TemplateArgs(TemplateArgs_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Name, TemplateArgs); }
+
+ StringView getBaseName() const override { return Name->getBaseName(); }
+
+ void printLeft(OutputStream &S) const override {
+ Name->print(S);
+ TemplateArgs->print(S);
+ }
+};
+
+class GlobalQualifiedName final : public Node {
+ Node *Child;
+
+public:
+ GlobalQualifiedName(Node* Child_)
+ : Node(KGlobalQualifiedName), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Child); }
+
+ StringView getBaseName() const override { return Child->getBaseName(); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "::";
+ Child->print(S);
+ }
+};
+
+struct StdQualifiedName : Node {
+ Node *Child;
+
+ StdQualifiedName(Node *Child_) : Node(KStdQualifiedName), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Child); }
+
+ StringView getBaseName() const override { return Child->getBaseName(); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "std::";
+ Child->print(S);
+ }
+};
+
+enum class SpecialSubKind {
+ allocator,
+ basic_string,
+ string,
+ istream,
+ ostream,
+ iostream,
+};
+
+class ExpandedSpecialSubstitution final : public Node {
+ SpecialSubKind SSK;
+
+public:
+ ExpandedSpecialSubstitution(SpecialSubKind SSK_)
+ : Node(KExpandedSpecialSubstitution), SSK(SSK_) {}
+
+ template<typename Fn> void match(Fn F) const { F(SSK); }
+
+ StringView getBaseName() const override {
+ switch (SSK) {
+ case SpecialSubKind::allocator:
+ return StringView("allocator");
+ case SpecialSubKind::basic_string:
+ return StringView("basic_string");
+ case SpecialSubKind::string:
+ return StringView("basic_string");
+ case SpecialSubKind::istream:
+ return StringView("basic_istream");
+ case SpecialSubKind::ostream:
+ return StringView("basic_ostream");
+ case SpecialSubKind::iostream:
+ return StringView("basic_iostream");
+ }
+ LLVM_BUILTIN_UNREACHABLE;
+ }
+
+ void printLeft(OutputStream &S) const override {
+ switch (SSK) {
+ case SpecialSubKind::allocator:
+ S += "std::allocator";
+ break;
+ case SpecialSubKind::basic_string:
+ S += "std::basic_string";
+ break;
+ case SpecialSubKind::string:
+ S += "std::basic_string<char, std::char_traits<char>, "
+ "std::allocator<char> >";
+ break;
+ case SpecialSubKind::istream:
+ S += "std::basic_istream<char, std::char_traits<char> >";
+ break;
+ case SpecialSubKind::ostream:
+ S += "std::basic_ostream<char, std::char_traits<char> >";
+ break;
+ case SpecialSubKind::iostream:
+ S += "std::basic_iostream<char, std::char_traits<char> >";
+ break;
+ }
+ }
+};
+
+class SpecialSubstitution final : public Node {
+public:
+ SpecialSubKind SSK;
+
+ SpecialSubstitution(SpecialSubKind SSK_)
+ : Node(KSpecialSubstitution), SSK(SSK_) {}
+
+ template<typename Fn> void match(Fn F) const { F(SSK); }
+
+ StringView getBaseName() const override {
+ switch (SSK) {
+ case SpecialSubKind::allocator:
+ return StringView("allocator");
+ case SpecialSubKind::basic_string:
+ return StringView("basic_string");
+ case SpecialSubKind::string:
+ return StringView("string");
+ case SpecialSubKind::istream:
+ return StringView("istream");
+ case SpecialSubKind::ostream:
+ return StringView("ostream");
+ case SpecialSubKind::iostream:
+ return StringView("iostream");
+ }
+ LLVM_BUILTIN_UNREACHABLE;
+ }
+
+ void printLeft(OutputStream &S) const override {
+ switch (SSK) {
+ case SpecialSubKind::allocator:
+ S += "std::allocator";
+ break;
+ case SpecialSubKind::basic_string:
+ S += "std::basic_string";
+ break;
+ case SpecialSubKind::string:
+ S += "std::string";
+ break;
+ case SpecialSubKind::istream:
+ S += "std::istream";
+ break;
+ case SpecialSubKind::ostream:
+ S += "std::ostream";
+ break;
+ case SpecialSubKind::iostream:
+ S += "std::iostream";
+ break;
+ }
+ }
+};
+
+class CtorDtorName final : public Node {
+ const Node *Basename;
+ const bool IsDtor;
+ const int Variant;
+
+public:
+ CtorDtorName(const Node *Basename_, bool IsDtor_, int Variant_)
+ : Node(KCtorDtorName), Basename(Basename_), IsDtor(IsDtor_),
+ Variant(Variant_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Basename, IsDtor, Variant); }
+
+ void printLeft(OutputStream &S) const override {
+ if (IsDtor)
+ S += "~";
+ S += Basename->getBaseName();
+ }
+};
+
+class DtorName : public Node {
+ const Node *Base;
+
+public:
+ DtorName(const Node *Base_) : Node(KDtorName), Base(Base_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Base); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "~";
+ Base->printLeft(S);
+ }
+};
+
+class UnnamedTypeName : public Node {
+ const StringView Count;
+
+public:
+ UnnamedTypeName(StringView Count_) : Node(KUnnamedTypeName), Count(Count_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Count); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "'unnamed";
+ S += Count;
+ S += "\'";
+ }
+};
+
+class ClosureTypeName : public Node {
+ NodeArray Params;
+ StringView Count;
+
+public:
+ ClosureTypeName(NodeArray Params_, StringView Count_)
+ : Node(KClosureTypeName), Params(Params_), Count(Count_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Params, Count); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "\'lambda";
+ S += Count;
+ S += "\'(";
+ Params.printWithComma(S);
+ S += ")";
+ }
+};
+
+class StructuredBindingName : public Node {
+ NodeArray Bindings;
+public:
+ StructuredBindingName(NodeArray Bindings_)
+ : Node(KStructuredBindingName), Bindings(Bindings_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Bindings); }
+
+ void printLeft(OutputStream &S) const override {
+ S += '[';
+ Bindings.printWithComma(S);
+ S += ']';
+ }
+};
+
+// -- Expression Nodes --
+
+class BinaryExpr : public Node {
+ const Node *LHS;
+ const StringView InfixOperator;
+ const Node *RHS;
+
+public:
+ BinaryExpr(const Node *LHS_, StringView InfixOperator_, const Node *RHS_)
+ : Node(KBinaryExpr), LHS(LHS_), InfixOperator(InfixOperator_), RHS(RHS_) {
+ }
+
+ template<typename Fn> void match(Fn F) const { F(LHS, InfixOperator, RHS); }
+
+ void printLeft(OutputStream &S) const override {
+ // might be a template argument expression, then we need to disambiguate
+ // with parens.
+ if (InfixOperator == ">")
+ S += "(";
+
+ S += "(";
+ LHS->print(S);
+ S += ") ";
+ S += InfixOperator;
+ S += " (";
+ RHS->print(S);
+ S += ")";
+
+ if (InfixOperator == ">")
+ S += ")";
+ }
+};
+
+class ArraySubscriptExpr : public Node {
+ const Node *Op1;
+ const Node *Op2;
+
+public:
+ ArraySubscriptExpr(const Node *Op1_, const Node *Op2_)
+ : Node(KArraySubscriptExpr), Op1(Op1_), Op2(Op2_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Op1, Op2); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "(";
+ Op1->print(S);
+ S += ")[";
+ Op2->print(S);
+ S += "]";
+ }
+};
+
+class PostfixExpr : public Node {
+ const Node *Child;
+ const StringView Operator;
+
+public:
+ PostfixExpr(const Node *Child_, StringView Operator_)
+ : Node(KPostfixExpr), Child(Child_), Operator(Operator_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Child, Operator); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "(";
+ Child->print(S);
+ S += ")";
+ S += Operator;
+ }
+};
+
+class ConditionalExpr : public Node {
+ const Node *Cond;
+ const Node *Then;
+ const Node *Else;
+
+public:
+ ConditionalExpr(const Node *Cond_, const Node *Then_, const Node *Else_)
+ : Node(KConditionalExpr), Cond(Cond_), Then(Then_), Else(Else_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Cond, Then, Else); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "(";
+ Cond->print(S);
+ S += ") ? (";
+ Then->print(S);
+ S += ") : (";
+ Else->print(S);
+ S += ")";
+ }
+};
+
+class MemberExpr : public Node {
+ const Node *LHS;
+ const StringView Kind;
+ const Node *RHS;
+
+public:
+ MemberExpr(const Node *LHS_, StringView Kind_, const Node *RHS_)
+ : Node(KMemberExpr), LHS(LHS_), Kind(Kind_), RHS(RHS_) {}
+
+ template<typename Fn> void match(Fn F) const { F(LHS, Kind, RHS); }
+
+ void printLeft(OutputStream &S) const override {
+ LHS->print(S);
+ S += Kind;
+ RHS->print(S);
+ }
+};
+
+class EnclosingExpr : public Node {
+ const StringView Prefix;
+ const Node *Infix;
+ const StringView Postfix;
+
+public:
+ EnclosingExpr(StringView Prefix_, Node *Infix_, StringView Postfix_)
+ : Node(KEnclosingExpr), Prefix(Prefix_), Infix(Infix_),
+ Postfix(Postfix_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Prefix, Infix, Postfix); }
+
+ void printLeft(OutputStream &S) const override {
+ S += Prefix;
+ Infix->print(S);
+ S += Postfix;
+ }
+};
+
+class CastExpr : public Node {
+ // cast_kind<to>(from)
+ const StringView CastKind;
+ const Node *To;
+ const Node *From;
+
+public:
+ CastExpr(StringView CastKind_, const Node *To_, const Node *From_)
+ : Node(KCastExpr), CastKind(CastKind_), To(To_), From(From_) {}
+
+ template<typename Fn> void match(Fn F) const { F(CastKind, To, From); }
+
+ void printLeft(OutputStream &S) const override {
+ S += CastKind;
+ S += "<";
+ To->printLeft(S);
+ S += ">(";
+ From->printLeft(S);
+ S += ")";
+ }
+};
+
+class SizeofParamPackExpr : public Node {
+ const Node *Pack;
+
+public:
+ SizeofParamPackExpr(const Node *Pack_)
+ : Node(KSizeofParamPackExpr), Pack(Pack_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Pack); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "sizeof...(";
+ ParameterPackExpansion PPE(Pack);
+ PPE.printLeft(S);
+ S += ")";
+ }
+};
+
+class CallExpr : public Node {
+ const Node *Callee;
+ NodeArray Args;
+
+public:
+ CallExpr(const Node *Callee_, NodeArray Args_)
+ : Node(KCallExpr), Callee(Callee_), Args(Args_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Callee, Args); }
+
+ void printLeft(OutputStream &S) const override {
+ Callee->print(S);
+ S += "(";
+ Args.printWithComma(S);
+ S += ")";
+ }
+};
+
+class NewExpr : public Node {
+ // new (expr_list) type(init_list)
+ NodeArray ExprList;
+ Node *Type;
+ NodeArray InitList;
+ bool IsGlobal; // ::operator new ?
+ bool IsArray; // new[] ?
+public:
+ NewExpr(NodeArray ExprList_, Node *Type_, NodeArray InitList_, bool IsGlobal_,
+ bool IsArray_)
+ : Node(KNewExpr), ExprList(ExprList_), Type(Type_), InitList(InitList_),
+ IsGlobal(IsGlobal_), IsArray(IsArray_) {}
+
+ template<typename Fn> void match(Fn F) const {
+ F(ExprList, Type, InitList, IsGlobal, IsArray);
+ }
+
+ void printLeft(OutputStream &S) const override {
+ if (IsGlobal)
+ S += "::operator ";
+ S += "new";
+ if (IsArray)
+ S += "[]";
+ S += ' ';
+ if (!ExprList.empty()) {
+ S += "(";
+ ExprList.printWithComma(S);
+ S += ")";
+ }
+ Type->print(S);
+ if (!InitList.empty()) {
+ S += "(";
+ InitList.printWithComma(S);
+ S += ")";
+ }
+
+ }
+};
+
+class DeleteExpr : public Node {
+ Node *Op;
+ bool IsGlobal;
+ bool IsArray;
+
+public:
+ DeleteExpr(Node *Op_, bool IsGlobal_, bool IsArray_)
+ : Node(KDeleteExpr), Op(Op_), IsGlobal(IsGlobal_), IsArray(IsArray_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Op, IsGlobal, IsArray); }
+
+ void printLeft(OutputStream &S) const override {
+ if (IsGlobal)
+ S += "::";
+ S += "delete";
+ if (IsArray)
+ S += "[] ";
+ Op->print(S);
+ }
+};
+
+class PrefixExpr : public Node {
+ StringView Prefix;
+ Node *Child;
+
+public:
+ PrefixExpr(StringView Prefix_, Node *Child_)
+ : Node(KPrefixExpr), Prefix(Prefix_), Child(Child_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Prefix, Child); }
+
+ void printLeft(OutputStream &S) const override {
+ S += Prefix;
+ S += "(";
+ Child->print(S);
+ S += ")";
+ }
+};
+
+class FunctionParam : public Node {
+ StringView Number;
+
+public:
+ FunctionParam(StringView Number_) : Node(KFunctionParam), Number(Number_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Number); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "fp";
+ S += Number;
+ }
+};
+
+class ConversionExpr : public Node {
+ const Node *Type;
+ NodeArray Expressions;
+
+public:
+ ConversionExpr(const Node *Type_, NodeArray Expressions_)
+ : Node(KConversionExpr), Type(Type_), Expressions(Expressions_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Type, Expressions); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "(";
+ Type->print(S);
+ S += ")(";
+ Expressions.printWithComma(S);
+ S += ")";
+ }
+};
+
+class InitListExpr : public Node {
+ const Node *Ty;
+ NodeArray Inits;
+public:
+ InitListExpr(const Node *Ty_, NodeArray Inits_)
+ : Node(KInitListExpr), Ty(Ty_), Inits(Inits_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty, Inits); }
+
+ void printLeft(OutputStream &S) const override {
+ if (Ty)
+ Ty->print(S);
+ S += '{';
+ Inits.printWithComma(S);
+ S += '}';
+ }
+};
+
+class BracedExpr : public Node {
+ const Node *Elem;
+ const Node *Init;
+ bool IsArray;
+public:
+ BracedExpr(const Node *Elem_, const Node *Init_, bool IsArray_)
+ : Node(KBracedExpr), Elem(Elem_), Init(Init_), IsArray(IsArray_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Elem, Init, IsArray); }
+
+ void printLeft(OutputStream &S) const override {
+ if (IsArray) {
+ S += '[';
+ Elem->print(S);
+ S += ']';
+ } else {
+ S += '.';
+ Elem->print(S);
+ }
+ if (Init->getKind() != KBracedExpr && Init->getKind() != KBracedRangeExpr)
+ S += " = ";
+ Init->print(S);
+ }
+};
+
+class BracedRangeExpr : public Node {
+ const Node *First;
+ const Node *Last;
+ const Node *Init;
+public:
+ BracedRangeExpr(const Node *First_, const Node *Last_, const Node *Init_)
+ : Node(KBracedRangeExpr), First(First_), Last(Last_), Init(Init_) {}
+
+ template<typename Fn> void match(Fn F) const { F(First, Last, Init); }
+
+ void printLeft(OutputStream &S) const override {
+ S += '[';
+ First->print(S);
+ S += " ... ";
+ Last->print(S);
+ S += ']';
+ if (Init->getKind() != KBracedExpr && Init->getKind() != KBracedRangeExpr)
+ S += " = ";
+ Init->print(S);
+ }
+};
+
+class FoldExpr : public Node {
+ const Node *Pack, *Init;
+ StringView OperatorName;
+ bool IsLeftFold;
+
+public:
+ FoldExpr(bool IsLeftFold_, StringView OperatorName_, const Node *Pack_,
+ const Node *Init_)
+ : Node(KFoldExpr), Pack(Pack_), Init(Init_), OperatorName(OperatorName_),
+ IsLeftFold(IsLeftFold_) {}
+
+ template<typename Fn> void match(Fn F) const {
+ F(IsLeftFold, OperatorName, Pack, Init);
+ }
+
+ void printLeft(OutputStream &S) const override {
+ auto PrintPack = [&] {
+ S += '(';
+ ParameterPackExpansion(Pack).print(S);
+ S += ')';
+ };
+
+ S += '(';
+
+ if (IsLeftFold) {
+ // init op ... op pack
+ if (Init != nullptr) {
+ Init->print(S);
+ S += ' ';
+ S += OperatorName;
+ S += ' ';
+ }
+ // ... op pack
+ S += "... ";
+ S += OperatorName;
+ S += ' ';
+ PrintPack();
+ } else { // !IsLeftFold
+ // pack op ...
+ PrintPack();
+ S += ' ';
+ S += OperatorName;
+ S += " ...";
+ // pack op ... op init
+ if (Init != nullptr) {
+ S += ' ';
+ S += OperatorName;
+ S += ' ';
+ Init->print(S);
+ }
+ }
+ S += ')';
+ }
+};
+
+class ThrowExpr : public Node {
+ const Node *Op;
+
+public:
+ ThrowExpr(const Node *Op_) : Node(KThrowExpr), Op(Op_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Op); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "throw ";
+ Op->print(S);
+ }
+};
+
+class BoolExpr : public Node {
+ bool Value;
+
+public:
+ BoolExpr(bool Value_) : Node(KBoolExpr), Value(Value_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Value); }
+
+ void printLeft(OutputStream &S) const override {
+ S += Value ? StringView("true") : StringView("false");
+ }
+};
+
+class IntegerCastExpr : public Node {
+ // ty(integer)
+ const Node *Ty;
+ StringView Integer;
+
+public:
+ IntegerCastExpr(const Node *Ty_, StringView Integer_)
+ : Node(KIntegerCastExpr), Ty(Ty_), Integer(Integer_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Ty, Integer); }
+
+ void printLeft(OutputStream &S) const override {
+ S += "(";
+ Ty->print(S);
+ S += ")";
+ S += Integer;
+ }
+};
+
+class IntegerLiteral : public Node {
+ StringView Type;
+ StringView Value;
+
+public:
+ IntegerLiteral(StringView Type_, StringView Value_)
+ : Node(KIntegerLiteral), Type(Type_), Value(Value_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Type, Value); }
+
+ void printLeft(OutputStream &S) const override {
+ if (Type.size() > 3) {
+ S += "(";
+ S += Type;
+ S += ")";
+ }
+
+ if (Value[0] == 'n') {
+ S += "-";
+ S += Value.dropFront(1);
+ } else
+ S += Value;
+
+ if (Type.size() <= 3)
+ S += Type;
+ }
+};
+
+template <class Float> struct FloatData;
+
+namespace float_literal_impl {
+constexpr Node::Kind getFloatLiteralKind(float *) {
+ return Node::KFloatLiteral;
+}
+constexpr Node::Kind getFloatLiteralKind(double *) {
+ return Node::KDoubleLiteral;
+}
+constexpr Node::Kind getFloatLiteralKind(long double *) {
+ return Node::KLongDoubleLiteral;
+}
+}
+
+template <class Float> class FloatLiteralImpl : public Node {
+ const StringView Contents;
+
+ static constexpr Kind KindForClass =
+ float_literal_impl::getFloatLiteralKind((Float *)nullptr);
+
+public:
+ FloatLiteralImpl(StringView Contents_)
+ : Node(KindForClass), Contents(Contents_) {}
+
+ template<typename Fn> void match(Fn F) const { F(Contents); }
+
+ void printLeft(OutputStream &s) const override {
+ const char *first = Contents.begin();
+ const char *last = Contents.end() + 1;
+
+ const size_t N = FloatData<Float>::mangled_size;
+ if (static_cast<std::size_t>(last - first) > N) {
+ last = first + N;
+ union {
+ Float value;
+ char buf[sizeof(Float)];
+ };
+ const char *t = first;
+ char *e = buf;
+ for (; t != last; ++t, ++e) {
+ unsigned d1 = isdigit(*t) ? static_cast<unsigned>(*t - '0')
+ : static_cast<unsigned>(*t - 'a' + 10);
+ ++t;
+ unsigned d0 = isdigit(*t) ? static_cast<unsigned>(*t - '0')
+ : static_cast<unsigned>(*t - 'a' + 10);
+ *e = static_cast<char>((d1 << 4) + d0);
+ }
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ std::reverse(buf, e);
+#endif
+ char num[FloatData<Float>::max_demangled_size] = {0};
+ int n = snprintf(num, sizeof(num), FloatData<Float>::spec, value);
+ s += StringView(num, num + n);
+ }
+ }
+};
+
+using FloatLiteral = FloatLiteralImpl<float>;
+using DoubleLiteral = FloatLiteralImpl<double>;
+using LongDoubleLiteral = FloatLiteralImpl<long double>;
+
+/// Visit the node. Calls \c F(P), where \c P is the node cast to the
+/// appropriate derived class.
+template<typename Fn>
+void Node::visit(Fn F) const {
+ switch (K) {
+#define CASE(X) case K ## X: return F(static_cast<const X*>(this));
+ FOR_EACH_NODE_KIND(CASE)
+#undef CASE
+ }
+ assert(0 && "unknown mangling node kind");
+}
+
+/// Determine the kind of a node from its type.
+template<typename NodeT> struct NodeKind;
+#define SPECIALIZATION(X) \
+ template<> struct NodeKind<X> { \
+ static constexpr Node::Kind Kind = Node::K##X; \
+ static constexpr const char *name() { return #X; } \
+ };
+FOR_EACH_NODE_KIND(SPECIALIZATION)
+#undef SPECIALIZATION
+
+#undef FOR_EACH_NODE_KIND
+
+template <class T, size_t N>
+class PODSmallVector {
+ static_assert(std::is_pod<T>::value,
+ "T is required to be a plain old data type");
+
+ T* First;
+ T* Last;
+ T* Cap;
+ T Inline[N];
+
+ bool isInline() const { return First == Inline; }
+
+ void clearInline() {
+ First = Inline;
+ Last = Inline;
+ Cap = Inline + N;
+ }
+
+ void reserve(size_t NewCap) {
+ size_t S = size();
+ if (isInline()) {
+ auto* Tmp = static_cast<T*>(std::malloc(NewCap * sizeof(T)));
+ if (Tmp == nullptr)
+ std::terminate();
+ std::copy(First, Last, Tmp);
+ First = Tmp;
+ } else {
+ First = static_cast<T*>(std::realloc(First, NewCap * sizeof(T)));
+ if (First == nullptr)
+ std::terminate();
+ }
+ Last = First + S;
+ Cap = First + NewCap;
+ }
+
+public:
+ PODSmallVector() : First(Inline), Last(First), Cap(Inline + N) {}
+
+ PODSmallVector(const PODSmallVector&) = delete;
+ PODSmallVector& operator=(const PODSmallVector&) = delete;
+
+ PODSmallVector(PODSmallVector&& Other) : PODSmallVector() {
+ if (Other.isInline()) {
+ std::copy(Other.begin(), Other.end(), First);
+ Last = First + Other.size();
+ Other.clear();
+ return;
+ }
+
+ First = Other.First;
+ Last = Other.Last;
+ Cap = Other.Cap;
+ Other.clearInline();
+ }
+
+ PODSmallVector& operator=(PODSmallVector&& Other) {
+ if (Other.isInline()) {
+ if (!isInline()) {
+ std::free(First);
+ clearInline();
+ }
+ std::copy(Other.begin(), Other.end(), First);
+ Last = First + Other.size();
+ Other.clear();
+ return *this;
+ }
+
+ if (isInline()) {
+ First = Other.First;
+ Last = Other.Last;
+ Cap = Other.Cap;
+ Other.clearInline();
+ return *this;
+ }
+
+ std::swap(First, Other.First);
+ std::swap(Last, Other.Last);
+ std::swap(Cap, Other.Cap);
+ Other.clear();
+ return *this;
+ }
+
+ void push_back(const T& Elem) {
+ if (Last == Cap)
+ reserve(size() * 2);
+ *Last++ = Elem;
+ }
+
+ void pop_back() {
+ assert(Last != First && "Popping empty vector!");
+ --Last;
+ }
+
+ void dropBack(size_t Index) {
+ assert(Index <= size() && "dropBack() can't expand!");
+ Last = First + Index;
+ }
+
+ T* begin() { return First; }
+ T* end() { return Last; }
+
+ bool empty() const { return First == Last; }
+ size_t size() const { return static_cast<size_t>(Last - First); }
+ T& back() {
+ assert(Last != First && "Calling back() on empty vector!");
+ return *(Last - 1);
+ }
+ T& operator[](size_t Index) {
+ assert(Index < size() && "Invalid access!");
+ return *(begin() + Index);
+ }
+ void clear() { Last = First; }
+
+ ~PODSmallVector() {
+ if (!isInline())
+ std::free(First);
+ }
+};
+
+template <typename Derived, typename Alloc> struct AbstractManglingParser {
+ const char *First;
+ const char *Last;
+
+ // Name stack, this is used by the parser to hold temporary names that were
+ // parsed. The parser collapses multiple names into new nodes to construct
+ // the AST. Once the parser is finished, names.size() == 1.
+ PODSmallVector<Node *, 32> Names;
+
+ // Substitution table. Itanium supports name substitutions as a means of
+ // compression. The string "S42_" refers to the 44nd entry (base-36) in this
+ // table.
+ PODSmallVector<Node *, 32> Subs;
+
+ // Template parameter table. Like the above, but referenced like "T42_".
+ // This has a smaller size compared to Subs and Names because it can be
+ // stored on the stack.
+ PODSmallVector<Node *, 8> TemplateParams;
+
+ // Set of unresolved forward <template-param> references. These can occur in a
+ // conversion operator's type, and are resolved in the enclosing <encoding>.
+ PODSmallVector<ForwardTemplateReference *, 4> ForwardTemplateRefs;
+
+ bool TryToParseTemplateArgs = true;
+ bool PermitForwardTemplateReferences = false;
+ bool ParsingLambdaParams = false;
+
+ Alloc ASTAllocator;
+
+ AbstractManglingParser(const char *First_, const char *Last_)
+ : First(First_), Last(Last_) {}
+
+ Derived &getDerived() { return static_cast<Derived &>(*this); }
+
+ void reset(const char *First_, const char *Last_) {
+ First = First_;
+ Last = Last_;
+ Names.clear();
+ Subs.clear();
+ TemplateParams.clear();
+ ParsingLambdaParams = false;
+ TryToParseTemplateArgs = true;
+ PermitForwardTemplateReferences = false;
+ ASTAllocator.reset();
+ }
+
+ template <class T, class... Args> Node *make(Args &&... args) {
+ return ASTAllocator.template makeNode<T>(std::forward<Args>(args)...);
+ }
+
+ template <class It> NodeArray makeNodeArray(It begin, It end) {
+ size_t sz = static_cast<size_t>(end - begin);
+ void *mem = ASTAllocator.allocateNodeArray(sz);
+ Node **data = new (mem) Node *[sz];
+ std::copy(begin, end, data);
+ return NodeArray(data, sz);
+ }
+
+ NodeArray popTrailingNodeArray(size_t FromPosition) {
+ assert(FromPosition <= Names.size());
+ NodeArray res =
+ makeNodeArray(Names.begin() + (long)FromPosition, Names.end());
+ Names.dropBack(FromPosition);
+ return res;
+ }
+
+ bool consumeIf(StringView S) {
+ if (StringView(First, Last).startsWith(S)) {
+ First += S.size();
+ return true;
+ }
+ return false;
+ }
+
+ bool consumeIf(char C) {
+ if (First != Last && *First == C) {
+ ++First;
+ return true;
+ }
+ return false;
+ }
+
+ char consume() { return First != Last ? *First++ : '\0'; }
+
+ char look(unsigned Lookahead = 0) {
+ if (static_cast<size_t>(Last - First) <= Lookahead)
+ return '\0';
+ return First[Lookahead];
+ }
+
+ size_t numLeft() const { return static_cast<size_t>(Last - First); }
+
+ StringView parseNumber(bool AllowNegative = false);
+ Qualifiers parseCVQualifiers();
+ bool parsePositiveInteger(size_t *Out);
+ StringView parseBareSourceName();
+
+ bool parseSeqId(size_t *Out);
+ Node *parseSubstitution();
+ Node *parseTemplateParam();
+ Node *parseTemplateArgs(bool TagTemplates = false);
+ Node *parseTemplateArg();
+
+ /// Parse the <expr> production.
+ Node *parseExpr();
+ Node *parsePrefixExpr(StringView Kind);
+ Node *parseBinaryExpr(StringView Kind);
+ Node *parseIntegerLiteral(StringView Lit);
+ Node *parseExprPrimary();
+ template <class Float> Node *parseFloatingLiteral();
+ Node *parseFunctionParam();
+ Node *parseNewExpr();
+ Node *parseConversionExpr();
+ Node *parseBracedExpr();
+ Node *parseFoldExpr();
+
+ /// Parse the <type> production.
+ Node *parseType();
+ Node *parseFunctionType();
+ Node *parseVectorType();
+ Node *parseDecltype();
+ Node *parseArrayType();
+ Node *parsePointerToMemberType();
+ Node *parseClassEnumType();
+ Node *parseQualifiedType();
+
+ Node *parseEncoding();
+ bool parseCallOffset();
+ Node *parseSpecialName();
+
+ /// Holds some extra information about a <name> that is being parsed. This
+ /// information is only pertinent if the <name> refers to an <encoding>.
+ struct NameState {
+ bool CtorDtorConversion = false;
+ bool EndsWithTemplateArgs = false;
+ Qualifiers CVQualifiers = QualNone;
+ FunctionRefQual ReferenceQualifier = FrefQualNone;
+ size_t ForwardTemplateRefsBegin;
+
+ NameState(AbstractManglingParser *Enclosing)
+ : ForwardTemplateRefsBegin(Enclosing->ForwardTemplateRefs.size()) {}
+ };
+
+ bool resolveForwardTemplateRefs(NameState &State) {
+ size_t I = State.ForwardTemplateRefsBegin;
+ size_t E = ForwardTemplateRefs.size();
+ for (; I < E; ++I) {
+ size_t Idx = ForwardTemplateRefs[I]->Index;
+ if (Idx >= TemplateParams.size())
+ return true;
+ ForwardTemplateRefs[I]->Ref = TemplateParams[Idx];
+ }
+ ForwardTemplateRefs.dropBack(State.ForwardTemplateRefsBegin);
+ return false;
+ }
+
+ /// Parse the <name> production>
+ Node *parseName(NameState *State = nullptr);
+ Node *parseLocalName(NameState *State);
+ Node *parseOperatorName(NameState *State);
+ Node *parseUnqualifiedName(NameState *State);
+ Node *parseUnnamedTypeName(NameState *State);
+ Node *parseSourceName(NameState *State);
+ Node *parseUnscopedName(NameState *State);
+ Node *parseNestedName(NameState *State);
+ Node *parseCtorDtorName(Node *&SoFar, NameState *State);
+
+ Node *parseAbiTags(Node *N);
+
+ /// Parse the <unresolved-name> production.
+ Node *parseUnresolvedName();
+ Node *parseSimpleId();
+ Node *parseBaseUnresolvedName();
+ Node *parseUnresolvedType();
+ Node *parseDestructorName();
+
+ /// Top-level entry point into the parser.
+ Node *parse();
+};
+
+const char* parse_discriminator(const char* first, const char* last);
+
+// <name> ::= <nested-name> // N
+// ::= <local-name> # See Scope Encoding below // Z
+// ::= <unscoped-template-name> <template-args>
+// ::= <unscoped-name>
+//
+// <unscoped-template-name> ::= <unscoped-name>
+// ::= <substitution>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseName(NameState *State) {
+ consumeIf('L'); // extension
+
+ if (look() == 'N')
+ return getDerived().parseNestedName(State);
+ if (look() == 'Z')
+ return getDerived().parseLocalName(State);
+
+ // ::= <unscoped-template-name> <template-args>
+ if (look() == 'S' && look(1) != 't') {
+ Node *S = getDerived().parseSubstitution();
+ if (S == nullptr)
+ return nullptr;
+ if (look() != 'I')
+ return nullptr;
+ Node *TA = getDerived().parseTemplateArgs(State != nullptr);
+ if (TA == nullptr)
+ return nullptr;
+ if (State) State->EndsWithTemplateArgs = true;
+ return make<NameWithTemplateArgs>(S, TA);
+ }
+
+ Node *N = getDerived().parseUnscopedName(State);
+ if (N == nullptr)
+ return nullptr;
+ // ::= <unscoped-template-name> <template-args>
+ if (look() == 'I') {
+ Subs.push_back(N);
+ Node *TA = getDerived().parseTemplateArgs(State != nullptr);
+ if (TA == nullptr)
+ return nullptr;
+ if (State) State->EndsWithTemplateArgs = true;
+ return make<NameWithTemplateArgs>(N, TA);
+ }
+ // ::= <unscoped-name>
+ return N;
+}
+
+// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+// := Z <function encoding> E s [<discriminator>]
+// := Z <function encoding> Ed [ <parameter number> ] _ <entity name>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseLocalName(NameState *State) {
+ if (!consumeIf('Z'))
+ return nullptr;
+ Node *Encoding = getDerived().parseEncoding();
+ if (Encoding == nullptr || !consumeIf('E'))
+ return nullptr;
+
+ if (consumeIf('s')) {
+ First = parse_discriminator(First, Last);
+ auto *StringLitName = make<NameType>("string literal");
+ if (!StringLitName)
+ return nullptr;
+ return make<LocalName>(Encoding, StringLitName);
+ }
+
+ if (consumeIf('d')) {
+ parseNumber(true);
+ if (!consumeIf('_'))
+ return nullptr;
+ Node *N = getDerived().parseName(State);
+ if (N == nullptr)
+ return nullptr;
+ return make<LocalName>(Encoding, N);
+ }
+
+ Node *Entity = getDerived().parseName(State);
+ if (Entity == nullptr)
+ return nullptr;
+ First = parse_discriminator(First, Last);
+ return make<LocalName>(Encoding, Entity);
+}
+
+// <unscoped-name> ::= <unqualified-name>
+// ::= St <unqualified-name> # ::std::
+// extension ::= StL<unqualified-name>
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseUnscopedName(NameState *State) {
+ if (consumeIf("StL") || consumeIf("St")) {
+ Node *R = getDerived().parseUnqualifiedName(State);
+ if (R == nullptr)
+ return nullptr;
+ return make<StdQualifiedName>(R);
+ }
+ return getDerived().parseUnqualifiedName(State);
+}
+
+// <unqualified-name> ::= <operator-name> [abi-tags]
+// ::= <ctor-dtor-name>
+// ::= <source-name>
+// ::= <unnamed-type-name>
+// ::= DC <source-name>+ E # structured binding declaration
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseUnqualifiedName(NameState *State) {
+ // <ctor-dtor-name>s are special-cased in parseNestedName().
+ Node *Result;
+ if (look() == 'U')
+ Result = getDerived().parseUnnamedTypeName(State);
+ else if (look() >= '1' && look() <= '9')
+ Result = getDerived().parseSourceName(State);
+ else if (consumeIf("DC")) {
+ size_t BindingsBegin = Names.size();
+ do {
+ Node *Binding = getDerived().parseSourceName(State);
+ if (Binding == nullptr)
+ return nullptr;
+ Names.push_back(Binding);
+ } while (!consumeIf('E'));
+ Result = make<StructuredBindingName>(popTrailingNodeArray(BindingsBegin));
+ } else
+ Result = getDerived().parseOperatorName(State);
+ if (Result != nullptr)
+ Result = getDerived().parseAbiTags(Result);
+ return Result;
+}
+
+// <unnamed-type-name> ::= Ut [<nonnegative number>] _
+// ::= <closure-type-name>
+//
+// <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
+//
+// <lambda-sig> ::= <parameter type>+ # Parameter types or "v" if the lambda has no parameters
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseUnnamedTypeName(NameState *) {
+ if (consumeIf("Ut")) {
+ StringView Count = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ return make<UnnamedTypeName>(Count);
+ }
+ if (consumeIf("Ul")) {
+ NodeArray Params;
+ SwapAndRestore<bool> SwapParams(ParsingLambdaParams, true);
+ if (!consumeIf("vE")) {
+ size_t ParamsBegin = Names.size();
+ do {
+ Node *P = getDerived().parseType();
+ if (P == nullptr)
+ return nullptr;
+ Names.push_back(P);
+ } while (!consumeIf('E'));
+ Params = popTrailingNodeArray(ParamsBegin);
+ }
+ StringView Count = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ return make<ClosureTypeName>(Params, Count);
+ }
+ return nullptr;
+}
+
+// <source-name> ::= <positive length number> <identifier>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseSourceName(NameState *) {
+ size_t Length = 0;
+ if (parsePositiveInteger(&Length))
+ return nullptr;
+ if (numLeft() < Length || Length == 0)
+ return nullptr;
+ StringView Name(First, First + Length);
+ First += Length;
+ if (Name.startsWith("_GLOBAL__N"))
+ return make<NameType>("(anonymous namespace)");
+ return make<NameType>(Name);
+}
+
+// <operator-name> ::= aa # &&
+// ::= ad # & (unary)
+// ::= an # &
+// ::= aN # &=
+// ::= aS # =
+// ::= cl # ()
+// ::= cm # ,
+// ::= co # ~
+// ::= cv <type> # (cast)
+// ::= da # delete[]
+// ::= de # * (unary)
+// ::= dl # delete
+// ::= dv # /
+// ::= dV # /=
+// ::= eo # ^
+// ::= eO # ^=
+// ::= eq # ==
+// ::= ge # >=
+// ::= gt # >
+// ::= ix # []
+// ::= le # <=
+// ::= li <source-name> # operator ""
+// ::= ls # <<
+// ::= lS # <<=
+// ::= lt # <
+// ::= mi # -
+// ::= mI # -=
+// ::= ml # *
+// ::= mL # *=
+// ::= mm # -- (postfix in <expression> context)
+// ::= na # new[]
+// ::= ne # !=
+// ::= ng # - (unary)
+// ::= nt # !
+// ::= nw # new
+// ::= oo # ||
+// ::= or # |
+// ::= oR # |=
+// ::= pm # ->*
+// ::= pl # +
+// ::= pL # +=
+// ::= pp # ++ (postfix in <expression> context)
+// ::= ps # + (unary)
+// ::= pt # ->
+// ::= qu # ?
+// ::= rm # %
+// ::= rM # %=
+// ::= rs # >>
+// ::= rS # >>=
+// ::= ss # <=> C++2a
+// ::= v <digit> <source-name> # vendor extended operator
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseOperatorName(NameState *State) {
+ switch (look()) {
+ case 'a':
+ switch (look(1)) {
+ case 'a':
+ First += 2;
+ return make<NameType>("operator&&");
+ case 'd':
+ case 'n':
+ First += 2;
+ return make<NameType>("operator&");
+ case 'N':
+ First += 2;
+ return make<NameType>("operator&=");
+ case 'S':
+ First += 2;
+ return make<NameType>("operator=");
+ }
+ return nullptr;
+ case 'c':
+ switch (look(1)) {
+ case 'l':
+ First += 2;
+ return make<NameType>("operator()");
+ case 'm':
+ First += 2;
+ return make<NameType>("operator,");
+ case 'o':
+ First += 2;
+ return make<NameType>("operator~");
+ // ::= cv <type> # (cast)
+ case 'v': {
+ First += 2;
+ SwapAndRestore<bool> SaveTemplate(TryToParseTemplateArgs, false);
+ // If we're parsing an encoding, State != nullptr and the conversion
+ // operators' <type> could have a <template-param> that refers to some
+ // <template-arg>s further ahead in the mangled name.
+ SwapAndRestore<bool> SavePermit(PermitForwardTemplateReferences,
+ PermitForwardTemplateReferences ||
+ State != nullptr);
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ if (State) State->CtorDtorConversion = true;
+ return make<ConversionOperatorType>(Ty);
+ }
+ }
+ return nullptr;
+ case 'd':
+ switch (look(1)) {
+ case 'a':
+ First += 2;
+ return make<NameType>("operator delete[]");
+ case 'e':
+ First += 2;
+ return make<NameType>("operator*");
+ case 'l':
+ First += 2;
+ return make<NameType>("operator delete");
+ case 'v':
+ First += 2;
+ return make<NameType>("operator/");
+ case 'V':
+ First += 2;
+ return make<NameType>("operator/=");
+ }
+ return nullptr;
+ case 'e':
+ switch (look(1)) {
+ case 'o':
+ First += 2;
+ return make<NameType>("operator^");
+ case 'O':
+ First += 2;
+ return make<NameType>("operator^=");
+ case 'q':
+ First += 2;
+ return make<NameType>("operator==");
+ }
+ return nullptr;
+ case 'g':
+ switch (look(1)) {
+ case 'e':
+ First += 2;
+ return make<NameType>("operator>=");
+ case 't':
+ First += 2;
+ return make<NameType>("operator>");
+ }
+ return nullptr;
+ case 'i':
+ if (look(1) == 'x') {
+ First += 2;
+ return make<NameType>("operator[]");
+ }
+ return nullptr;
+ case 'l':
+ switch (look(1)) {
+ case 'e':
+ First += 2;
+ return make<NameType>("operator<=");
+ // ::= li <source-name> # operator ""
+ case 'i': {
+ First += 2;
+ Node *SN = getDerived().parseSourceName(State);
+ if (SN == nullptr)
+ return nullptr;
+ return make<LiteralOperator>(SN);
+ }
+ case 's':
+ First += 2;
+ return make<NameType>("operator<<");
+ case 'S':
+ First += 2;
+ return make<NameType>("operator<<=");
+ case 't':
+ First += 2;
+ return make<NameType>("operator<");
+ }
+ return nullptr;
+ case 'm':
+ switch (look(1)) {
+ case 'i':
+ First += 2;
+ return make<NameType>("operator-");
+ case 'I':
+ First += 2;
+ return make<NameType>("operator-=");
+ case 'l':
+ First += 2;
+ return make<NameType>("operator*");
+ case 'L':
+ First += 2;
+ return make<NameType>("operator*=");
+ case 'm':
+ First += 2;
+ return make<NameType>("operator--");
+ }
+ return nullptr;
+ case 'n':
+ switch (look(1)) {
+ case 'a':
+ First += 2;
+ return make<NameType>("operator new[]");
+ case 'e':
+ First += 2;
+ return make<NameType>("operator!=");
+ case 'g':
+ First += 2;
+ return make<NameType>("operator-");
+ case 't':
+ First += 2;
+ return make<NameType>("operator!");
+ case 'w':
+ First += 2;
+ return make<NameType>("operator new");
+ }
+ return nullptr;
+ case 'o':
+ switch (look(1)) {
+ case 'o':
+ First += 2;
+ return make<NameType>("operator||");
+ case 'r':
+ First += 2;
+ return make<NameType>("operator|");
+ case 'R':
+ First += 2;
+ return make<NameType>("operator|=");
+ }
+ return nullptr;
+ case 'p':
+ switch (look(1)) {
+ case 'm':
+ First += 2;
+ return make<NameType>("operator->*");
+ case 'l':
+ First += 2;
+ return make<NameType>("operator+");
+ case 'L':
+ First += 2;
+ return make<NameType>("operator+=");
+ case 'p':
+ First += 2;
+ return make<NameType>("operator++");
+ case 's':
+ First += 2;
+ return make<NameType>("operator+");
+ case 't':
+ First += 2;
+ return make<NameType>("operator->");
+ }
+ return nullptr;
+ case 'q':
+ if (look(1) == 'u') {
+ First += 2;
+ return make<NameType>("operator?");
+ }
+ return nullptr;
+ case 'r':
+ switch (look(1)) {
+ case 'm':
+ First += 2;
+ return make<NameType>("operator%");
+ case 'M':
+ First += 2;
+ return make<NameType>("operator%=");
+ case 's':
+ First += 2;
+ return make<NameType>("operator>>");
+ case 'S':
+ First += 2;
+ return make<NameType>("operator>>=");
+ }
+ return nullptr;
+ case 's':
+ if (look(1) == 's') {
+ First += 2;
+ return make<NameType>("operator<=>");
+ }
+ return nullptr;
+ // ::= v <digit> <source-name> # vendor extended operator
+ case 'v':
+ if (std::isdigit(look(1))) {
+ First += 2;
+ Node *SN = getDerived().parseSourceName(State);
+ if (SN == nullptr)
+ return nullptr;
+ return make<ConversionOperatorType>(SN);
+ }
+ return nullptr;
+ }
+ return nullptr;
+}
+
+// <ctor-dtor-name> ::= C1 # complete object constructor
+// ::= C2 # base object constructor
+// ::= C3 # complete object allocating constructor
+// extension ::= C5 # ?
+// ::= D0 # deleting destructor
+// ::= D1 # complete object destructor
+// ::= D2 # base object destructor
+// extension ::= D5 # ?
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseCtorDtorName(Node *&SoFar,
+ NameState *State) {
+ if (SoFar->getKind() == Node::KSpecialSubstitution) {
+ auto SSK = static_cast<SpecialSubstitution *>(SoFar)->SSK;
+ switch (SSK) {
+ case SpecialSubKind::string:
+ case SpecialSubKind::istream:
+ case SpecialSubKind::ostream:
+ case SpecialSubKind::iostream:
+ SoFar = make<ExpandedSpecialSubstitution>(SSK);
+ if (!SoFar)
+ return nullptr;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (consumeIf('C')) {
+ bool IsInherited = consumeIf('I');
+ if (look() != '1' && look() != '2' && look() != '3' && look() != '5')
+ return nullptr;
+ int Variant = look() - '0';
+ ++First;
+ if (State) State->CtorDtorConversion = true;
+ if (IsInherited) {
+ if (getDerived().parseName(State) == nullptr)
+ return nullptr;
+ }
+ return make<CtorDtorName>(SoFar, false, Variant);
+ }
+
+ if (look() == 'D' &&
+ (look(1) == '0' || look(1) == '1' || look(1) == '2' || look(1) == '5')) {
+ int Variant = look(1) - '0';
+ First += 2;
+ if (State) State->CtorDtorConversion = true;
+ return make<CtorDtorName>(SoFar, true, Variant);
+ }
+
+ return nullptr;
+}
+
+// <nested-name> ::= N [<CV-Qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
+// ::= N [<CV-Qualifiers>] [<ref-qualifier>] <template-prefix> <template-args> E
+//
+// <prefix> ::= <prefix> <unqualified-name>
+// ::= <template-prefix> <template-args>
+// ::= <template-param>
+// ::= <decltype>
+// ::= # empty
+// ::= <substitution>
+// ::= <prefix> <data-member-prefix>
+// extension ::= L
+//
+// <data-member-prefix> := <member source-name> [<template-args>] M
+//
+// <template-prefix> ::= <prefix> <template unqualified-name>
+// ::= <template-param>
+// ::= <substitution>
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseNestedName(NameState *State) {
+ if (!consumeIf('N'))
+ return nullptr;
+
+ Qualifiers CVTmp = parseCVQualifiers();
+ if (State) State->CVQualifiers = CVTmp;
+
+ if (consumeIf('O')) {
+ if (State) State->ReferenceQualifier = FrefQualRValue;
+ } else if (consumeIf('R')) {
+ if (State) State->ReferenceQualifier = FrefQualLValue;
+ } else
+ if (State) State->ReferenceQualifier = FrefQualNone;
+
+ Node *SoFar = nullptr;
+ auto PushComponent = [&](Node *Comp) {
+ if (!Comp) return false;
+ if (SoFar) SoFar = make<NestedName>(SoFar, Comp);
+ else SoFar = Comp;
+ if (State) State->EndsWithTemplateArgs = false;
+ return SoFar != nullptr;
+ };
+
+ if (consumeIf("St")) {
+ SoFar = make<NameType>("std");
+ if (!SoFar)
+ return nullptr;
+ }
+
+ while (!consumeIf('E')) {
+ consumeIf('L'); // extension
+
+ // <data-member-prefix> := <member source-name> [<template-args>] M
+ if (consumeIf('M')) {
+ if (SoFar == nullptr)
+ return nullptr;
+ continue;
+ }
+
+ // ::= <template-param>
+ if (look() == 'T') {
+ if (!PushComponent(getDerived().parseTemplateParam()))
+ return nullptr;
+ Subs.push_back(SoFar);
+ continue;
+ }
+
+ // ::= <template-prefix> <template-args>
+ if (look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs(State != nullptr);
+ if (TA == nullptr || SoFar == nullptr)
+ return nullptr;
+ SoFar = make<NameWithTemplateArgs>(SoFar, TA);
+ if (!SoFar)
+ return nullptr;
+ if (State) State->EndsWithTemplateArgs = true;
+ Subs.push_back(SoFar);
+ continue;
+ }
+
+ // ::= <decltype>
+ if (look() == 'D' && (look(1) == 't' || look(1) == 'T')) {
+ if (!PushComponent(getDerived().parseDecltype()))
+ return nullptr;
+ Subs.push_back(SoFar);
+ continue;
+ }
+
+ // ::= <substitution>
+ if (look() == 'S' && look(1) != 't') {
+ Node *S = getDerived().parseSubstitution();
+ if (!PushComponent(S))
+ return nullptr;
+ if (SoFar != S)
+ Subs.push_back(S);
+ continue;
+ }
+
+ // Parse an <unqualified-name> thats actually a <ctor-dtor-name>.
+ if (look() == 'C' || (look() == 'D' && look(1) != 'C')) {
+ if (SoFar == nullptr)
+ return nullptr;
+ if (!PushComponent(getDerived().parseCtorDtorName(SoFar, State)))
+ return nullptr;
+ SoFar = getDerived().parseAbiTags(SoFar);
+ if (SoFar == nullptr)
+ return nullptr;
+ Subs.push_back(SoFar);
+ continue;
+ }
+
+ // ::= <prefix> <unqualified-name>
+ if (!PushComponent(getDerived().parseUnqualifiedName(State)))
+ return nullptr;
+ Subs.push_back(SoFar);
+ }
+
+ if (SoFar == nullptr || Subs.empty())
+ return nullptr;
+
+ Subs.pop_back();
+ return SoFar;
+}
+
+// <simple-id> ::= <source-name> [ <template-args> ]
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseSimpleId() {
+ Node *SN = getDerived().parseSourceName(/*NameState=*/nullptr);
+ if (SN == nullptr)
+ return nullptr;
+ if (look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ return make<NameWithTemplateArgs>(SN, TA);
+ }
+ return SN;
+}
+
+// <destructor-name> ::= <unresolved-type> # e.g., ~T or ~decltype(f())
+// ::= <simple-id> # e.g., ~A<2*N>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseDestructorName() {
+ Node *Result;
+ if (std::isdigit(look()))
+ Result = getDerived().parseSimpleId();
+ else
+ Result = getDerived().parseUnresolvedType();
+ if (Result == nullptr)
+ return nullptr;
+ return make<DtorName>(Result);
+}
+
+// <unresolved-type> ::= <template-param>
+// ::= <decltype>
+// ::= <substitution>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseUnresolvedType() {
+ if (look() == 'T') {
+ Node *TP = getDerived().parseTemplateParam();
+ if (TP == nullptr)
+ return nullptr;
+ Subs.push_back(TP);
+ return TP;
+ }
+ if (look() == 'D') {
+ Node *DT = getDerived().parseDecltype();
+ if (DT == nullptr)
+ return nullptr;
+ Subs.push_back(DT);
+ return DT;
+ }
+ return getDerived().parseSubstitution();
+}
+
+// <base-unresolved-name> ::= <simple-id> # unresolved name
+// extension ::= <operator-name> # unresolved operator-function-id
+// extension ::= <operator-name> <template-args> # unresolved operator template-id
+// ::= on <operator-name> # unresolved operator-function-id
+// ::= on <operator-name> <template-args> # unresolved operator template-id
+// ::= dn <destructor-name> # destructor or pseudo-destructor;
+// # e.g. ~X or ~X<N-1>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseBaseUnresolvedName() {
+ if (std::isdigit(look()))
+ return getDerived().parseSimpleId();
+
+ if (consumeIf("dn"))
+ return getDerived().parseDestructorName();
+
+ consumeIf("on");
+
+ Node *Oper = getDerived().parseOperatorName(/*NameState=*/nullptr);
+ if (Oper == nullptr)
+ return nullptr;
+ if (look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ return make<NameWithTemplateArgs>(Oper, TA);
+ }
+ return Oper;
+}
+
+// <unresolved-name>
+// extension ::= srN <unresolved-type> [<template-args>] <unresolved-qualifier-level>* E <base-unresolved-name>
+// ::= [gs] <base-unresolved-name> # x or (with "gs") ::x
+// ::= [gs] sr <unresolved-qualifier-level>+ E <base-unresolved-name>
+// # A::x, N::y, A<T>::z; "gs" means leading "::"
+// ::= sr <unresolved-type> <base-unresolved-name> # T::x / decltype(p)::x
+// extension ::= sr <unresolved-type> <template-args> <base-unresolved-name>
+// # T::N::x /decltype(p)::N::x
+// (ignored) ::= srN <unresolved-type> <unresolved-qualifier-level>+ E <base-unresolved-name>
+//
+// <unresolved-qualifier-level> ::= <simple-id>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseUnresolvedName() {
+ Node *SoFar = nullptr;
+
+ // srN <unresolved-type> [<template-args>] <unresolved-qualifier-level>* E <base-unresolved-name>
+ // srN <unresolved-type> <unresolved-qualifier-level>+ E <base-unresolved-name>
+ if (consumeIf("srN")) {
+ SoFar = getDerived().parseUnresolvedType();
+ if (SoFar == nullptr)
+ return nullptr;
+
+ if (look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ SoFar = make<NameWithTemplateArgs>(SoFar, TA);
+ if (!SoFar)
+ return nullptr;
+ }
+
+ while (!consumeIf('E')) {
+ Node *Qual = getDerived().parseSimpleId();
+ if (Qual == nullptr)
+ return nullptr;
+ SoFar = make<QualifiedName>(SoFar, Qual);
+ if (!SoFar)
+ return nullptr;
+ }
+
+ Node *Base = getDerived().parseBaseUnresolvedName();
+ if (Base == nullptr)
+ return nullptr;
+ return make<QualifiedName>(SoFar, Base);
+ }
+
+ bool Global = consumeIf("gs");
+
+ // [gs] <base-unresolved-name> # x or (with "gs") ::x
+ if (!consumeIf("sr")) {
+ SoFar = getDerived().parseBaseUnresolvedName();
+ if (SoFar == nullptr)
+ return nullptr;
+ if (Global)
+ SoFar = make<GlobalQualifiedName>(SoFar);
+ return SoFar;
+ }
+
+ // [gs] sr <unresolved-qualifier-level>+ E <base-unresolved-name>
+ if (std::isdigit(look())) {
+ do {
+ Node *Qual = getDerived().parseSimpleId();
+ if (Qual == nullptr)
+ return nullptr;
+ if (SoFar)
+ SoFar = make<QualifiedName>(SoFar, Qual);
+ else if (Global)
+ SoFar = make<GlobalQualifiedName>(Qual);
+ else
+ SoFar = Qual;
+ if (!SoFar)
+ return nullptr;
+ } while (!consumeIf('E'));
+ }
+ // sr <unresolved-type> <base-unresolved-name>
+ // sr <unresolved-type> <template-args> <base-unresolved-name>
+ else {
+ SoFar = getDerived().parseUnresolvedType();
+ if (SoFar == nullptr)
+ return nullptr;
+
+ if (look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ SoFar = make<NameWithTemplateArgs>(SoFar, TA);
+ if (!SoFar)
+ return nullptr;
+ }
+ }
+
+ assert(SoFar != nullptr);
+
+ Node *Base = getDerived().parseBaseUnresolvedName();
+ if (Base == nullptr)
+ return nullptr;
+ return make<QualifiedName>(SoFar, Base);
+}
+
+// <abi-tags> ::= <abi-tag> [<abi-tags>]
+// <abi-tag> ::= B <source-name>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseAbiTags(Node *N) {
+ while (consumeIf('B')) {
+ StringView SN = parseBareSourceName();
+ if (SN.empty())
+ return nullptr;
+ N = make<AbiTagAttr>(N, SN);
+ if (!N)
+ return nullptr;
+ }
+ return N;
+}
+
+// <number> ::= [n] <non-negative decimal integer>
+template <typename Alloc, typename Derived>
+StringView
+AbstractManglingParser<Alloc, Derived>::parseNumber(bool AllowNegative) {
+ const char *Tmp = First;
+ if (AllowNegative)
+ consumeIf('n');
+ if (numLeft() == 0 || !std::isdigit(*First))
+ return StringView();
+ while (numLeft() != 0 && std::isdigit(*First))
+ ++First;
+ return StringView(Tmp, First);
+}
+
+// <positive length number> ::= [0-9]*
+template <typename Alloc, typename Derived>
+bool AbstractManglingParser<Alloc, Derived>::parsePositiveInteger(size_t *Out) {
+ *Out = 0;
+ if (look() < '0' || look() > '9')
+ return true;
+ while (look() >= '0' && look() <= '9') {
+ *Out *= 10;
+ *Out += static_cast<size_t>(consume() - '0');
+ }
+ return false;
+}
+
+template <typename Alloc, typename Derived>
+StringView AbstractManglingParser<Alloc, Derived>::parseBareSourceName() {
+ size_t Int = 0;
+ if (parsePositiveInteger(&Int) || numLeft() < Int)
+ return StringView();
+ StringView R(First, First + Int);
+ First += Int;
+ return R;
+}
+
+// <function-type> ::= [<CV-qualifiers>] [<exception-spec>] [Dx] F [Y] <bare-function-type> [<ref-qualifier>] E
+//
+// <exception-spec> ::= Do # non-throwing exception-specification (e.g., noexcept, throw())
+// ::= DO <expression> E # computed (instantiation-dependent) noexcept
+// ::= Dw <type>+ E # dynamic exception specification with instantiation-dependent types
+//
+// <ref-qualifier> ::= R # & ref-qualifier
+// <ref-qualifier> ::= O # && ref-qualifier
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseFunctionType() {
+ Qualifiers CVQuals = parseCVQualifiers();
+
+ Node *ExceptionSpec = nullptr;
+ if (consumeIf("Do")) {
+ ExceptionSpec = make<NameType>("noexcept");
+ if (!ExceptionSpec)
+ return nullptr;
+ } else if (consumeIf("DO")) {
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr || !consumeIf('E'))
+ return nullptr;
+ ExceptionSpec = make<NoexceptSpec>(E);
+ if (!ExceptionSpec)
+ return nullptr;
+ } else if (consumeIf("Dw")) {
+ size_t SpecsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return nullptr;
+ Names.push_back(T);
+ }
+ ExceptionSpec =
+ make<DynamicExceptionSpec>(popTrailingNodeArray(SpecsBegin));
+ if (!ExceptionSpec)
+ return nullptr;
+ }
+
+ consumeIf("Dx"); // transaction safe
+
+ if (!consumeIf('F'))
+ return nullptr;
+ consumeIf('Y'); // extern "C"
+ Node *ReturnType = getDerived().parseType();
+ if (ReturnType == nullptr)
+ return nullptr;
+
+ FunctionRefQual ReferenceQualifier = FrefQualNone;
+ size_t ParamsBegin = Names.size();
+ while (true) {
+ if (consumeIf('E'))
+ break;
+ if (consumeIf('v'))
+ continue;
+ if (consumeIf("RE")) {
+ ReferenceQualifier = FrefQualLValue;
+ break;
+ }
+ if (consumeIf("OE")) {
+ ReferenceQualifier = FrefQualRValue;
+ break;
+ }
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return nullptr;
+ Names.push_back(T);
+ }
+
+ NodeArray Params = popTrailingNodeArray(ParamsBegin);
+ return make<FunctionType>(ReturnType, Params, CVQuals,
+ ReferenceQualifier, ExceptionSpec);
+}
+
+// extension:
+// <vector-type> ::= Dv <positive dimension number> _ <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseVectorType() {
+ if (!consumeIf("Dv"))
+ return nullptr;
+ if (look() >= '1' && look() <= '9') {
+ StringView DimensionNumber = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ if (consumeIf('p'))
+ return make<PixelVectorType>(DimensionNumber);
+ Node *ElemType = getDerived().parseType();
+ if (ElemType == nullptr)
+ return nullptr;
+ return make<VectorType>(ElemType, DimensionNumber);
+ }
+
+ if (!consumeIf('_')) {
+ Node *DimExpr = getDerived().parseExpr();
+ if (!DimExpr)
+ return nullptr;
+ if (!consumeIf('_'))
+ return nullptr;
+ Node *ElemType = getDerived().parseType();
+ if (!ElemType)
+ return nullptr;
+ return make<VectorType>(ElemType, DimExpr);
+ }
+ Node *ElemType = getDerived().parseType();
+ if (!ElemType)
+ return nullptr;
+ return make<VectorType>(ElemType, StringView());
+}
+
+// <decltype> ::= Dt <expression> E # decltype of an id-expression or class member access (C++0x)
+// ::= DT <expression> E # decltype of an expression (C++0x)
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseDecltype() {
+ if (!consumeIf('D'))
+ return nullptr;
+ if (!consumeIf('t') && !consumeIf('T'))
+ return nullptr;
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr)
+ return nullptr;
+ if (!consumeIf('E'))
+ return nullptr;
+ return make<EnclosingExpr>("decltype(", E, ")");
+}
+
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseArrayType() {
+ if (!consumeIf('A'))
+ return nullptr;
+
+ NodeOrString Dimension;
+
+ if (std::isdigit(look())) {
+ Dimension = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ } else if (!consumeIf('_')) {
+ Node *DimExpr = getDerived().parseExpr();
+ if (DimExpr == nullptr)
+ return nullptr;
+ if (!consumeIf('_'))
+ return nullptr;
+ Dimension = DimExpr;
+ }
+
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<ArrayType>(Ty, Dimension);
+}
+
+// <pointer-to-member-type> ::= M <class type> <member type>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parsePointerToMemberType() {
+ if (!consumeIf('M'))
+ return nullptr;
+ Node *ClassType = getDerived().parseType();
+ if (ClassType == nullptr)
+ return nullptr;
+ Node *MemberType = getDerived().parseType();
+ if (MemberType == nullptr)
+ return nullptr;
+ return make<PointerToMemberType>(ClassType, MemberType);
+}
+
+// <class-enum-type> ::= <name> # non-dependent type name, dependent type name, or dependent typename-specifier
+// ::= Ts <name> # dependent elaborated type specifier using 'struct' or 'class'
+// ::= Tu <name> # dependent elaborated type specifier using 'union'
+// ::= Te <name> # dependent elaborated type specifier using 'enum'
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseClassEnumType() {
+ StringView ElabSpef;
+ if (consumeIf("Ts"))
+ ElabSpef = "struct";
+ else if (consumeIf("Tu"))
+ ElabSpef = "union";
+ else if (consumeIf("Te"))
+ ElabSpef = "enum";
+
+ Node *Name = getDerived().parseName();
+ if (Name == nullptr)
+ return nullptr;
+
+ if (!ElabSpef.empty())
+ return make<ElaboratedTypeSpefType>(ElabSpef, Name);
+
+ return Name;
+}
+
+// <qualified-type> ::= <qualifiers> <type>
+// <qualifiers> ::= <extended-qualifier>* <CV-qualifiers>
+// <extended-qualifier> ::= U <source-name> [<template-args>] # vendor extended type qualifier
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseQualifiedType() {
+ if (consumeIf('U')) {
+ StringView Qual = parseBareSourceName();
+ if (Qual.empty())
+ return nullptr;
+
+ // FIXME parse the optional <template-args> here!
+
+ // extension ::= U <objc-name> <objc-type> # objc-type<identifier>
+ if (Qual.startsWith("objcproto")) {
+ StringView ProtoSourceName = Qual.dropFront(std::strlen("objcproto"));
+ StringView Proto;
+ {
+ SwapAndRestore<const char *> SaveFirst(First, ProtoSourceName.begin()),
+ SaveLast(Last, ProtoSourceName.end());
+ Proto = parseBareSourceName();
+ }
+ if (Proto.empty())
+ return nullptr;
+ Node *Child = getDerived().parseQualifiedType();
+ if (Child == nullptr)
+ return nullptr;
+ return make<ObjCProtoName>(Child, Proto);
+ }
+
+ Node *Child = getDerived().parseQualifiedType();
+ if (Child == nullptr)
+ return nullptr;
+ return make<VendorExtQualType>(Child, Qual);
+ }
+
+ Qualifiers Quals = parseCVQualifiers();
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ if (Quals != QualNone)
+ Ty = make<QualType>(Ty, Quals);
+ return Ty;
+}
+
+// <type> ::= <builtin-type>
+// ::= <qualified-type>
+// ::= <function-type>
+// ::= <class-enum-type>
+// ::= <array-type>
+// ::= <pointer-to-member-type>
+// ::= <template-param>
+// ::= <template-template-param> <template-args>
+// ::= <decltype>
+// ::= P <type> # pointer
+// ::= R <type> # l-value reference
+// ::= O <type> # r-value reference (C++11)
+// ::= C <type> # complex pair (C99)
+// ::= G <type> # imaginary (C99)
+// ::= <substitution> # See Compression below
+// extension ::= U <objc-name> <objc-type> # objc-type<identifier>
+// extension ::= <vector-type> # <vector-type> starts with Dv
+//
+// <objc-name> ::= <k0 number> objcproto <k1 number> <identifier> # k0 = 9 + <number of digits in k1> + k1
+// <objc-type> ::= <source-name> # PU<11+>objcproto 11objc_object<source-name> 11objc_object -> id<source-name>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseType() {
+ Node *Result = nullptr;
+
+ switch (look()) {
+ // ::= <qualified-type>
+ case 'r':
+ case 'V':
+ case 'K': {
+ unsigned AfterQuals = 0;
+ if (look(AfterQuals) == 'r') ++AfterQuals;
+ if (look(AfterQuals) == 'V') ++AfterQuals;
+ if (look(AfterQuals) == 'K') ++AfterQuals;
+
+ if (look(AfterQuals) == 'F' ||
+ (look(AfterQuals) == 'D' &&
+ (look(AfterQuals + 1) == 'o' || look(AfterQuals + 1) == 'O' ||
+ look(AfterQuals + 1) == 'w' || look(AfterQuals + 1) == 'x'))) {
+ Result = getDerived().parseFunctionType();
+ break;
+ }
+ LLVM_FALLTHROUGH;
+ }
+ case 'U': {
+ Result = getDerived().parseQualifiedType();
+ break;
+ }
+ // <builtin-type> ::= v # void
+ case 'v':
+ ++First;
+ return make<NameType>("void");
+ // ::= w # wchar_t
+ case 'w':
+ ++First;
+ return make<NameType>("wchar_t");
+ // ::= b # bool
+ case 'b':
+ ++First;
+ return make<NameType>("bool");
+ // ::= c # char
+ case 'c':
+ ++First;
+ return make<NameType>("char");
+ // ::= a # signed char
+ case 'a':
+ ++First;
+ return make<NameType>("signed char");
+ // ::= h # unsigned char
+ case 'h':
+ ++First;
+ return make<NameType>("unsigned char");
+ // ::= s # short
+ case 's':
+ ++First;
+ return make<NameType>("short");
+ // ::= t # unsigned short
+ case 't':
+ ++First;
+ return make<NameType>("unsigned short");
+ // ::= i # int
+ case 'i':
+ ++First;
+ return make<NameType>("int");
+ // ::= j # unsigned int
+ case 'j':
+ ++First;
+ return make<NameType>("unsigned int");
+ // ::= l # long
+ case 'l':
+ ++First;
+ return make<NameType>("long");
+ // ::= m # unsigned long
+ case 'm':
+ ++First;
+ return make<NameType>("unsigned long");
+ // ::= x # long long, __int64
+ case 'x':
+ ++First;
+ return make<NameType>("long long");
+ // ::= y # unsigned long long, __int64
+ case 'y':
+ ++First;
+ return make<NameType>("unsigned long long");
+ // ::= n # __int128
+ case 'n':
+ ++First;
+ return make<NameType>("__int128");
+ // ::= o # unsigned __int128
+ case 'o':
+ ++First;
+ return make<NameType>("unsigned __int128");
+ // ::= f # float
+ case 'f':
+ ++First;
+ return make<NameType>("float");
+ // ::= d # double
+ case 'd':
+ ++First;
+ return make<NameType>("double");
+ // ::= e # long double, __float80
+ case 'e':
+ ++First;
+ return make<NameType>("long double");
+ // ::= g # __float128
+ case 'g':
+ ++First;
+ return make<NameType>("__float128");
+ // ::= z # ellipsis
+ case 'z':
+ ++First;
+ return make<NameType>("...");
+
+ // <builtin-type> ::= u <source-name> # vendor extended type
+ case 'u': {
+ ++First;
+ StringView Res = parseBareSourceName();
+ if (Res.empty())
+ return nullptr;
+ return make<NameType>(Res);
+ }
+ case 'D':
+ switch (look(1)) {
+ // ::= Dd # IEEE 754r decimal floating point (64 bits)
+ case 'd':
+ First += 2;
+ return make<NameType>("decimal64");
+ // ::= De # IEEE 754r decimal floating point (128 bits)
+ case 'e':
+ First += 2;
+ return make<NameType>("decimal128");
+ // ::= Df # IEEE 754r decimal floating point (32 bits)
+ case 'f':
+ First += 2;
+ return make<NameType>("decimal32");
+ // ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ case 'h':
+ First += 2;
+ return make<NameType>("decimal16");
+ // ::= Di # char32_t
+ case 'i':
+ First += 2;
+ return make<NameType>("char32_t");
+ // ::= Ds # char16_t
+ case 's':
+ First += 2;
+ return make<NameType>("char16_t");
+ // ::= Da # auto (in dependent new-expressions)
+ case 'a':
+ First += 2;
+ return make<NameType>("auto");
+ // ::= Dc # decltype(auto)
+ case 'c':
+ First += 2;
+ return make<NameType>("decltype(auto)");
+ // ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
+ case 'n':
+ First += 2;
+ return make<NameType>("std::nullptr_t");
+
+ // ::= <decltype>
+ case 't':
+ case 'T': {
+ Result = getDerived().parseDecltype();
+ break;
+ }
+ // extension ::= <vector-type> # <vector-type> starts with Dv
+ case 'v': {
+ Result = getDerived().parseVectorType();
+ break;
+ }
+ // ::= Dp <type> # pack expansion (C++0x)
+ case 'p': {
+ First += 2;
+ Node *Child = getDerived().parseType();
+ if (!Child)
+ return nullptr;
+ Result = make<ParameterPackExpansion>(Child);
+ break;
+ }
+ // Exception specifier on a function type.
+ case 'o':
+ case 'O':
+ case 'w':
+ // Transaction safe function type.
+ case 'x':
+ Result = getDerived().parseFunctionType();
+ break;
+ }
+ break;
+ // ::= <function-type>
+ case 'F': {
+ Result = getDerived().parseFunctionType();
+ break;
+ }
+ // ::= <array-type>
+ case 'A': {
+ Result = getDerived().parseArrayType();
+ break;
+ }
+ // ::= <pointer-to-member-type>
+ case 'M': {
+ Result = getDerived().parsePointerToMemberType();
+ break;
+ }
+ // ::= <template-param>
+ case 'T': {
+ // This could be an elaborate type specifier on a <class-enum-type>.
+ if (look(1) == 's' || look(1) == 'u' || look(1) == 'e') {
+ Result = getDerived().parseClassEnumType();
+ break;
+ }
+
+ Result = getDerived().parseTemplateParam();
+ if (Result == nullptr)
+ return nullptr;
+
+ // Result could be either of:
+ // <type> ::= <template-param>
+ // <type> ::= <template-template-param> <template-args>
+ //
+ // <template-template-param> ::= <template-param>
+ // ::= <substitution>
+ //
+ // If this is followed by some <template-args>, and we're permitted to
+ // parse them, take the second production.
+
+ if (TryToParseTemplateArgs && look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ Result = make<NameWithTemplateArgs>(Result, TA);
+ }
+ break;
+ }
+ // ::= P <type> # pointer
+ case 'P': {
+ ++First;
+ Node *Ptr = getDerived().parseType();
+ if (Ptr == nullptr)
+ return nullptr;
+ Result = make<PointerType>(Ptr);
+ break;
+ }
+ // ::= R <type> # l-value reference
+ case 'R': {
+ ++First;
+ Node *Ref = getDerived().parseType();
+ if (Ref == nullptr)
+ return nullptr;
+ Result = make<ReferenceType>(Ref, ReferenceKind::LValue);
+ break;
+ }
+ // ::= O <type> # r-value reference (C++11)
+ case 'O': {
+ ++First;
+ Node *Ref = getDerived().parseType();
+ if (Ref == nullptr)
+ return nullptr;
+ Result = make<ReferenceType>(Ref, ReferenceKind::RValue);
+ break;
+ }
+ // ::= C <type> # complex pair (C99)
+ case 'C': {
+ ++First;
+ Node *P = getDerived().parseType();
+ if (P == nullptr)
+ return nullptr;
+ Result = make<PostfixQualifiedType>(P, " complex");
+ break;
+ }
+ // ::= G <type> # imaginary (C99)
+ case 'G': {
+ ++First;
+ Node *P = getDerived().parseType();
+ if (P == nullptr)
+ return P;
+ Result = make<PostfixQualifiedType>(P, " imaginary");
+ break;
+ }
+ // ::= <substitution> # See Compression below
+ case 'S': {
+ if (look(1) && look(1) != 't') {
+ Node *Sub = getDerived().parseSubstitution();
+ if (Sub == nullptr)
+ return nullptr;
+
+ // Sub could be either of:
+ // <type> ::= <substitution>
+ // <type> ::= <template-template-param> <template-args>
+ //
+ // <template-template-param> ::= <template-param>
+ // ::= <substitution>
+ //
+ // If this is followed by some <template-args>, and we're permitted to
+ // parse them, take the second production.
+
+ if (TryToParseTemplateArgs && look() == 'I') {
+ Node *TA = getDerived().parseTemplateArgs();
+ if (TA == nullptr)
+ return nullptr;
+ Result = make<NameWithTemplateArgs>(Sub, TA);
+ break;
+ }
+
+ // If all we parsed was a substitution, don't re-insert into the
+ // substitution table.
+ return Sub;
+ }
+ LLVM_FALLTHROUGH;
+ }
+ // ::= <class-enum-type>
+ default: {
+ Result = getDerived().parseClassEnumType();
+ break;
+ }
+ }
+
+ // If we parsed a type, insert it into the substitution table. Note that all
+ // <builtin-type>s and <substitution>s have already bailed out, because they
+ // don't get substitutions.
+ if (Result != nullptr)
+ Subs.push_back(Result);
+ return Result;
+}
+
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parsePrefixExpr(StringView Kind) {
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr)
+ return nullptr;
+ return make<PrefixExpr>(Kind, E);
+}
+
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseBinaryExpr(StringView Kind) {
+ Node *LHS = getDerived().parseExpr();
+ if (LHS == nullptr)
+ return nullptr;
+ Node *RHS = getDerived().parseExpr();
+ if (RHS == nullptr)
+ return nullptr;
+ return make<BinaryExpr>(LHS, Kind, RHS);
+}
+
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseIntegerLiteral(StringView Lit) {
+ StringView Tmp = parseNumber(true);
+ if (!Tmp.empty() && consumeIf('E'))
+ return make<IntegerLiteral>(Lit, Tmp);
+ return nullptr;
+}
+
+// <CV-Qualifiers> ::= [r] [V] [K]
+template <typename Alloc, typename Derived>
+Qualifiers AbstractManglingParser<Alloc, Derived>::parseCVQualifiers() {
+ Qualifiers CVR = QualNone;
+ if (consumeIf('r'))
+ CVR |= QualRestrict;
+ if (consumeIf('V'))
+ CVR |= QualVolatile;
+ if (consumeIf('K'))
+ CVR |= QualConst;
+ return CVR;
+}
+
+// <function-param> ::= fp <top-level CV-Qualifiers> _ # L == 0, first parameter
+// ::= fp <top-level CV-Qualifiers> <parameter-2 non-negative number> _ # L == 0, second and later parameters
+// ::= fL <L-1 non-negative number> p <top-level CV-Qualifiers> _ # L > 0, first parameter
+// ::= fL <L-1 non-negative number> p <top-level CV-Qualifiers> <parameter-2 non-negative number> _ # L > 0, second and later parameters
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseFunctionParam() {
+ if (consumeIf("fp")) {
+ parseCVQualifiers();
+ StringView Num = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ return make<FunctionParam>(Num);
+ }
+ if (consumeIf("fL")) {
+ if (parseNumber().empty())
+ return nullptr;
+ if (!consumeIf('p'))
+ return nullptr;
+ parseCVQualifiers();
+ StringView Num = parseNumber();
+ if (!consumeIf('_'))
+ return nullptr;
+ return make<FunctionParam>(Num);
+ }
+ return nullptr;
+}
+
+// [gs] nw <expression>* _ <type> E # new (expr-list) type
+// [gs] nw <expression>* _ <type> <initializer> # new (expr-list) type (init)
+// [gs] na <expression>* _ <type> E # new[] (expr-list) type
+// [gs] na <expression>* _ <type> <initializer> # new[] (expr-list) type (init)
+// <initializer> ::= pi <expression>* E # parenthesized initialization
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseNewExpr() {
+ bool Global = consumeIf("gs");
+ bool IsArray = look(1) == 'a';
+ if (!consumeIf("nw") && !consumeIf("na"))
+ return nullptr;
+ size_t Exprs = Names.size();
+ while (!consumeIf('_')) {
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return nullptr;
+ Names.push_back(Ex);
+ }
+ NodeArray ExprList = popTrailingNodeArray(Exprs);
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return Ty;
+ if (consumeIf("pi")) {
+ size_t InitsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *Init = getDerived().parseExpr();
+ if (Init == nullptr)
+ return Init;
+ Names.push_back(Init);
+ }
+ NodeArray Inits = popTrailingNodeArray(InitsBegin);
+ return make<NewExpr>(ExprList, Ty, Inits, Global, IsArray);
+ } else if (!consumeIf('E'))
+ return nullptr;
+ return make<NewExpr>(ExprList, Ty, NodeArray(), Global, IsArray);
+}
+
+// cv <type> <expression> # conversion with one argument
+// cv <type> _ <expression>* E # conversion with a different number of arguments
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseConversionExpr() {
+ if (!consumeIf("cv"))
+ return nullptr;
+ Node *Ty;
+ {
+ SwapAndRestore<bool> SaveTemp(TryToParseTemplateArgs, false);
+ Ty = getDerived().parseType();
+ }
+
+ if (Ty == nullptr)
+ return nullptr;
+
+ if (consumeIf('_')) {
+ size_t ExprsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr)
+ return E;
+ Names.push_back(E);
+ }
+ NodeArray Exprs = popTrailingNodeArray(ExprsBegin);
+ return make<ConversionExpr>(Ty, Exprs);
+ }
+
+ Node *E[1] = {getDerived().parseExpr()};
+ if (E[0] == nullptr)
+ return nullptr;
+ return make<ConversionExpr>(Ty, makeNodeArray(E, E + 1));
+}
+
+// <expr-primary> ::= L <type> <value number> E # integer literal
+// ::= L <type> <value float> E # floating literal
+// ::= L <string type> E # string literal
+// ::= L <nullptr type> E # nullptr literal (i.e., "LDnE")
+// FIXME: ::= L <type> <real-part float> _ <imag-part float> E # complex floating point literal (C 2000)
+// ::= L <mangled-name> E # external name
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseExprPrimary() {
+ if (!consumeIf('L'))
+ return nullptr;
+ switch (look()) {
+ case 'w':
+ ++First;
+ return getDerived().parseIntegerLiteral("wchar_t");
+ case 'b':
+ if (consumeIf("b0E"))
+ return make<BoolExpr>(0);
+ if (consumeIf("b1E"))
+ return make<BoolExpr>(1);
+ return nullptr;
+ case 'c':
+ ++First;
+ return getDerived().parseIntegerLiteral("char");
+ case 'a':
+ ++First;
+ return getDerived().parseIntegerLiteral("signed char");
+ case 'h':
+ ++First;
+ return getDerived().parseIntegerLiteral("unsigned char");
+ case 's':
+ ++First;
+ return getDerived().parseIntegerLiteral("short");
+ case 't':
+ ++First;
+ return getDerived().parseIntegerLiteral("unsigned short");
+ case 'i':
+ ++First;
+ return getDerived().parseIntegerLiteral("");
+ case 'j':
+ ++First;
+ return getDerived().parseIntegerLiteral("u");
+ case 'l':
+ ++First;
+ return getDerived().parseIntegerLiteral("l");
+ case 'm':
+ ++First;
+ return getDerived().parseIntegerLiteral("ul");
+ case 'x':
+ ++First;
+ return getDerived().parseIntegerLiteral("ll");
+ case 'y':
+ ++First;
+ return getDerived().parseIntegerLiteral("ull");
+ case 'n':
+ ++First;
+ return getDerived().parseIntegerLiteral("__int128");
+ case 'o':
+ ++First;
+ return getDerived().parseIntegerLiteral("unsigned __int128");
+ case 'f':
+ ++First;
+ return getDerived().template parseFloatingLiteral<float>();
+ case 'd':
+ ++First;
+ return getDerived().template parseFloatingLiteral<double>();
+ case 'e':
+ ++First;
+ return getDerived().template parseFloatingLiteral<long double>();
+ case '_':
+ if (consumeIf("_Z")) {
+ Node *R = getDerived().parseEncoding();
+ if (R != nullptr && consumeIf('E'))
+ return R;
+ }
+ return nullptr;
+ case 'T':
+ // Invalid mangled name per
+ // http://sourcerytools.com/pipermail/cxx-abi-dev/2011-August/002422.html
+ return nullptr;
+ default: {
+ // might be named type
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return nullptr;
+ StringView N = parseNumber();
+ if (!N.empty()) {
+ if (!consumeIf('E'))
+ return nullptr;
+ return make<IntegerCastExpr>(T, N);
+ }
+ if (consumeIf('E'))
+ return T;
+ return nullptr;
+ }
+ }
+}
+
+// <braced-expression> ::= <expression>
+// ::= di <field source-name> <braced-expression> # .name = expr
+// ::= dx <index expression> <braced-expression> # [expr] = expr
+// ::= dX <range begin expression> <range end expression> <braced-expression>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseBracedExpr() {
+ if (look() == 'd') {
+ switch (look(1)) {
+ case 'i': {
+ First += 2;
+ Node *Field = getDerived().parseSourceName(/*NameState=*/nullptr);
+ if (Field == nullptr)
+ return nullptr;
+ Node *Init = getDerived().parseBracedExpr();
+ if (Init == nullptr)
+ return nullptr;
+ return make<BracedExpr>(Field, Init, /*isArray=*/false);
+ }
+ case 'x': {
+ First += 2;
+ Node *Index = getDerived().parseExpr();
+ if (Index == nullptr)
+ return nullptr;
+ Node *Init = getDerived().parseBracedExpr();
+ if (Init == nullptr)
+ return nullptr;
+ return make<BracedExpr>(Index, Init, /*isArray=*/true);
+ }
+ case 'X': {
+ First += 2;
+ Node *RangeBegin = getDerived().parseExpr();
+ if (RangeBegin == nullptr)
+ return nullptr;
+ Node *RangeEnd = getDerived().parseExpr();
+ if (RangeEnd == nullptr)
+ return nullptr;
+ Node *Init = getDerived().parseBracedExpr();
+ if (Init == nullptr)
+ return nullptr;
+ return make<BracedRangeExpr>(RangeBegin, RangeEnd, Init);
+ }
+ }
+ }
+ return getDerived().parseExpr();
+}
+
+// (not yet in the spec)
+// <fold-expr> ::= fL <binary-operator-name> <expression> <expression>
+// ::= fR <binary-operator-name> <expression> <expression>
+// ::= fl <binary-operator-name> <expression>
+// ::= fr <binary-operator-name> <expression>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseFoldExpr() {
+ if (!consumeIf('f'))
+ return nullptr;
+
+ char FoldKind = look();
+ bool IsLeftFold, HasInitializer;
+ HasInitializer = FoldKind == 'L' || FoldKind == 'R';
+ if (FoldKind == 'l' || FoldKind == 'L')
+ IsLeftFold = true;
+ else if (FoldKind == 'r' || FoldKind == 'R')
+ IsLeftFold = false;
+ else
+ return nullptr;
+ ++First;
+
+ // FIXME: This map is duplicated in parseOperatorName and parseExpr.
+ StringView OperatorName;
+ if (consumeIf("aa")) OperatorName = "&&";
+ else if (consumeIf("an")) OperatorName = "&";
+ else if (consumeIf("aN")) OperatorName = "&=";
+ else if (consumeIf("aS")) OperatorName = "=";
+ else if (consumeIf("cm")) OperatorName = ",";
+ else if (consumeIf("ds")) OperatorName = ".*";
+ else if (consumeIf("dv")) OperatorName = "/";
+ else if (consumeIf("dV")) OperatorName = "/=";
+ else if (consumeIf("eo")) OperatorName = "^";
+ else if (consumeIf("eO")) OperatorName = "^=";
+ else if (consumeIf("eq")) OperatorName = "==";
+ else if (consumeIf("ge")) OperatorName = ">=";
+ else if (consumeIf("gt")) OperatorName = ">";
+ else if (consumeIf("le")) OperatorName = "<=";
+ else if (consumeIf("ls")) OperatorName = "<<";
+ else if (consumeIf("lS")) OperatorName = "<<=";
+ else if (consumeIf("lt")) OperatorName = "<";
+ else if (consumeIf("mi")) OperatorName = "-";
+ else if (consumeIf("mI")) OperatorName = "-=";
+ else if (consumeIf("ml")) OperatorName = "*";
+ else if (consumeIf("mL")) OperatorName = "*=";
+ else if (consumeIf("ne")) OperatorName = "!=";
+ else if (consumeIf("oo")) OperatorName = "||";
+ else if (consumeIf("or")) OperatorName = "|";
+ else if (consumeIf("oR")) OperatorName = "|=";
+ else if (consumeIf("pl")) OperatorName = "+";
+ else if (consumeIf("pL")) OperatorName = "+=";
+ else if (consumeIf("rm")) OperatorName = "%";
+ else if (consumeIf("rM")) OperatorName = "%=";
+ else if (consumeIf("rs")) OperatorName = ">>";
+ else if (consumeIf("rS")) OperatorName = ">>=";
+ else return nullptr;
+
+ Node *Pack = getDerived().parseExpr(), *Init = nullptr;
+ if (Pack == nullptr)
+ return nullptr;
+ if (HasInitializer) {
+ Init = getDerived().parseExpr();
+ if (Init == nullptr)
+ return nullptr;
+ }
+
+ if (IsLeftFold && Init)
+ std::swap(Pack, Init);
+
+ return make<FoldExpr>(IsLeftFold, OperatorName, Pack, Init);
+}
+
+// <expression> ::= <unary operator-name> <expression>
+// ::= <binary operator-name> <expression> <expression>
+// ::= <ternary operator-name> <expression> <expression> <expression>
+// ::= cl <expression>+ E # call
+// ::= cv <type> <expression> # conversion with one argument
+// ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+// ::= [gs] nw <expression>* _ <type> E # new (expr-list) type
+// ::= [gs] nw <expression>* _ <type> <initializer> # new (expr-list) type (init)
+// ::= [gs] na <expression>* _ <type> E # new[] (expr-list) type
+// ::= [gs] na <expression>* _ <type> <initializer> # new[] (expr-list) type (init)
+// ::= [gs] dl <expression> # delete expression
+// ::= [gs] da <expression> # delete[] expression
+// ::= pp_ <expression> # prefix ++
+// ::= mm_ <expression> # prefix --
+// ::= ti <type> # typeid (type)
+// ::= te <expression> # typeid (expression)
+// ::= dc <type> <expression> # dynamic_cast<type> (expression)
+// ::= sc <type> <expression> # static_cast<type> (expression)
+// ::= cc <type> <expression> # const_cast<type> (expression)
+// ::= rc <type> <expression> # reinterpret_cast<type> (expression)
+// ::= st <type> # sizeof (a type)
+// ::= sz <expression> # sizeof (an expression)
+// ::= at <type> # alignof (a type)
+// ::= az <expression> # alignof (an expression)
+// ::= nx <expression> # noexcept (expression)
+// ::= <template-param>
+// ::= <function-param>
+// ::= dt <expression> <unresolved-name> # expr.name
+// ::= pt <expression> <unresolved-name> # expr->name
+// ::= ds <expression> <expression> # expr.*expr
+// ::= sZ <template-param> # size of a parameter pack
+// ::= sZ <function-param> # size of a function parameter pack
+// ::= sP <template-arg>* E # sizeof...(T), size of a captured template parameter pack from an alias template
+// ::= sp <expression> # pack expansion
+// ::= tw <expression> # throw expression
+// ::= tr # throw with no operand (rethrow)
+// ::= <unresolved-name> # f(p), N::f(p), ::f(p),
+// # freestanding dependent name (e.g., T::x),
+// # objectless nonstatic member reference
+// ::= fL <binary-operator-name> <expression> <expression>
+// ::= fR <binary-operator-name> <expression> <expression>
+// ::= fl <binary-operator-name> <expression>
+// ::= fr <binary-operator-name> <expression>
+// ::= <expr-primary>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseExpr() {
+ bool Global = consumeIf("gs");
+ if (numLeft() < 2)
+ return nullptr;
+
+ switch (*First) {
+ case 'L':
+ return getDerived().parseExprPrimary();
+ case 'T':
+ return getDerived().parseTemplateParam();
+ case 'f': {
+ // Disambiguate a fold expression from a <function-param>.
+ if (look(1) == 'p' || (look(1) == 'L' && std::isdigit(look(2))))
+ return getDerived().parseFunctionParam();
+ return getDerived().parseFoldExpr();
+ }
+ case 'a':
+ switch (First[1]) {
+ case 'a':
+ First += 2;
+ return getDerived().parseBinaryExpr("&&");
+ case 'd':
+ First += 2;
+ return getDerived().parsePrefixExpr("&");
+ case 'n':
+ First += 2;
+ return getDerived().parseBinaryExpr("&");
+ case 'N':
+ First += 2;
+ return getDerived().parseBinaryExpr("&=");
+ case 'S':
+ First += 2;
+ return getDerived().parseBinaryExpr("=");
+ case 't': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<EnclosingExpr>("alignof (", Ty, ")");
+ }
+ case 'z': {
+ First += 2;
+ Node *Ty = getDerived().parseExpr();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<EnclosingExpr>("alignof (", Ty, ")");
+ }
+ }
+ return nullptr;
+ case 'c':
+ switch (First[1]) {
+ // cc <type> <expression> # const_cast<type>(expression)
+ case 'c': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return Ty;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<CastExpr>("const_cast", Ty, Ex);
+ }
+ // cl <expression>+ E # call
+ case 'l': {
+ First += 2;
+ Node *Callee = getDerived().parseExpr();
+ if (Callee == nullptr)
+ return Callee;
+ size_t ExprsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr)
+ return E;
+ Names.push_back(E);
+ }
+ return make<CallExpr>(Callee, popTrailingNodeArray(ExprsBegin));
+ }
+ case 'm':
+ First += 2;
+ return getDerived().parseBinaryExpr(",");
+ case 'o':
+ First += 2;
+ return getDerived().parsePrefixExpr("~");
+ case 'v':
+ return getDerived().parseConversionExpr();
+ }
+ return nullptr;
+ case 'd':
+ switch (First[1]) {
+ case 'a': {
+ First += 2;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<DeleteExpr>(Ex, Global, /*is_array=*/true);
+ }
+ case 'c': {
+ First += 2;
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return T;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<CastExpr>("dynamic_cast", T, Ex);
+ }
+ case 'e':
+ First += 2;
+ return getDerived().parsePrefixExpr("*");
+ case 'l': {
+ First += 2;
+ Node *E = getDerived().parseExpr();
+ if (E == nullptr)
+ return E;
+ return make<DeleteExpr>(E, Global, /*is_array=*/false);
+ }
+ case 'n':
+ return getDerived().parseUnresolvedName();
+ case 's': {
+ First += 2;
+ Node *LHS = getDerived().parseExpr();
+ if (LHS == nullptr)
+ return nullptr;
+ Node *RHS = getDerived().parseExpr();
+ if (RHS == nullptr)
+ return nullptr;
+ return make<MemberExpr>(LHS, ".*", RHS);
+ }
+ case 't': {
+ First += 2;
+ Node *LHS = getDerived().parseExpr();
+ if (LHS == nullptr)
+ return LHS;
+ Node *RHS = getDerived().parseExpr();
+ if (RHS == nullptr)
+ return nullptr;
+ return make<MemberExpr>(LHS, ".", RHS);
+ }
+ case 'v':
+ First += 2;
+ return getDerived().parseBinaryExpr("/");
+ case 'V':
+ First += 2;
+ return getDerived().parseBinaryExpr("/=");
+ }
+ return nullptr;
+ case 'e':
+ switch (First[1]) {
+ case 'o':
+ First += 2;
+ return getDerived().parseBinaryExpr("^");
+ case 'O':
+ First += 2;
+ return getDerived().parseBinaryExpr("^=");
+ case 'q':
+ First += 2;
+ return getDerived().parseBinaryExpr("==");
+ }
+ return nullptr;
+ case 'g':
+ switch (First[1]) {
+ case 'e':
+ First += 2;
+ return getDerived().parseBinaryExpr(">=");
+ case 't':
+ First += 2;
+ return getDerived().parseBinaryExpr(">");
+ }
+ return nullptr;
+ case 'i':
+ switch (First[1]) {
+ case 'x': {
+ First += 2;
+ Node *Base = getDerived().parseExpr();
+ if (Base == nullptr)
+ return nullptr;
+ Node *Index = getDerived().parseExpr();
+ if (Index == nullptr)
+ return Index;
+ return make<ArraySubscriptExpr>(Base, Index);
+ }
+ case 'l': {
+ First += 2;
+ size_t InitsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *E = getDerived().parseBracedExpr();
+ if (E == nullptr)
+ return nullptr;
+ Names.push_back(E);
+ }
+ return make<InitListExpr>(nullptr, popTrailingNodeArray(InitsBegin));
+ }
+ }
+ return nullptr;
+ case 'l':
+ switch (First[1]) {
+ case 'e':
+ First += 2;
+ return getDerived().parseBinaryExpr("<=");
+ case 's':
+ First += 2;
+ return getDerived().parseBinaryExpr("<<");
+ case 'S':
+ First += 2;
+ return getDerived().parseBinaryExpr("<<=");
+ case 't':
+ First += 2;
+ return getDerived().parseBinaryExpr("<");
+ }
+ return nullptr;
+ case 'm':
+ switch (First[1]) {
+ case 'i':
+ First += 2;
+ return getDerived().parseBinaryExpr("-");
+ case 'I':
+ First += 2;
+ return getDerived().parseBinaryExpr("-=");
+ case 'l':
+ First += 2;
+ return getDerived().parseBinaryExpr("*");
+ case 'L':
+ First += 2;
+ return getDerived().parseBinaryExpr("*=");
+ case 'm':
+ First += 2;
+ if (consumeIf('_'))
+ return getDerived().parsePrefixExpr("--");
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return nullptr;
+ return make<PostfixExpr>(Ex, "--");
+ }
+ return nullptr;
+ case 'n':
+ switch (First[1]) {
+ case 'a':
+ case 'w':
+ return getDerived().parseNewExpr();
+ case 'e':
+ First += 2;
+ return getDerived().parseBinaryExpr("!=");
+ case 'g':
+ First += 2;
+ return getDerived().parsePrefixExpr("-");
+ case 't':
+ First += 2;
+ return getDerived().parsePrefixExpr("!");
+ case 'x':
+ First += 2;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<EnclosingExpr>("noexcept (", Ex, ")");
+ }
+ return nullptr;
+ case 'o':
+ switch (First[1]) {
+ case 'n':
+ return getDerived().parseUnresolvedName();
+ case 'o':
+ First += 2;
+ return getDerived().parseBinaryExpr("||");
+ case 'r':
+ First += 2;
+ return getDerived().parseBinaryExpr("|");
+ case 'R':
+ First += 2;
+ return getDerived().parseBinaryExpr("|=");
+ }
+ return nullptr;
+ case 'p':
+ switch (First[1]) {
+ case 'm':
+ First += 2;
+ return getDerived().parseBinaryExpr("->*");
+ case 'l':
+ First += 2;
+ return getDerived().parseBinaryExpr("+");
+ case 'L':
+ First += 2;
+ return getDerived().parseBinaryExpr("+=");
+ case 'p': {
+ First += 2;
+ if (consumeIf('_'))
+ return getDerived().parsePrefixExpr("++");
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<PostfixExpr>(Ex, "++");
+ }
+ case 's':
+ First += 2;
+ return getDerived().parsePrefixExpr("+");
+ case 't': {
+ First += 2;
+ Node *L = getDerived().parseExpr();
+ if (L == nullptr)
+ return nullptr;
+ Node *R = getDerived().parseExpr();
+ if (R == nullptr)
+ return nullptr;
+ return make<MemberExpr>(L, "->", R);
+ }
+ }
+ return nullptr;
+ case 'q':
+ if (First[1] == 'u') {
+ First += 2;
+ Node *Cond = getDerived().parseExpr();
+ if (Cond == nullptr)
+ return nullptr;
+ Node *LHS = getDerived().parseExpr();
+ if (LHS == nullptr)
+ return nullptr;
+ Node *RHS = getDerived().parseExpr();
+ if (RHS == nullptr)
+ return nullptr;
+ return make<ConditionalExpr>(Cond, LHS, RHS);
+ }
+ return nullptr;
+ case 'r':
+ switch (First[1]) {
+ case 'c': {
+ First += 2;
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return T;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<CastExpr>("reinterpret_cast", T, Ex);
+ }
+ case 'm':
+ First += 2;
+ return getDerived().parseBinaryExpr("%");
+ case 'M':
+ First += 2;
+ return getDerived().parseBinaryExpr("%=");
+ case 's':
+ First += 2;
+ return getDerived().parseBinaryExpr(">>");
+ case 'S':
+ First += 2;
+ return getDerived().parseBinaryExpr(">>=");
+ }
+ return nullptr;
+ case 's':
+ switch (First[1]) {
+ case 'c': {
+ First += 2;
+ Node *T = getDerived().parseType();
+ if (T == nullptr)
+ return T;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<CastExpr>("static_cast", T, Ex);
+ }
+ case 'p': {
+ First += 2;
+ Node *Child = getDerived().parseExpr();
+ if (Child == nullptr)
+ return nullptr;
+ return make<ParameterPackExpansion>(Child);
+ }
+ case 'r':
+ return getDerived().parseUnresolvedName();
+ case 't': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return Ty;
+ return make<EnclosingExpr>("sizeof (", Ty, ")");
+ }
+ case 'z': {
+ First += 2;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<EnclosingExpr>("sizeof (", Ex, ")");
+ }
+ case 'Z':
+ First += 2;
+ if (look() == 'T') {
+ Node *R = getDerived().parseTemplateParam();
+ if (R == nullptr)
+ return nullptr;
+ return make<SizeofParamPackExpr>(R);
+ } else if (look() == 'f') {
+ Node *FP = getDerived().parseFunctionParam();
+ if (FP == nullptr)
+ return nullptr;
+ return make<EnclosingExpr>("sizeof... (", FP, ")");
+ }
+ return nullptr;
+ case 'P': {
+ First += 2;
+ size_t ArgsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *Arg = getDerived().parseTemplateArg();
+ if (Arg == nullptr)
+ return nullptr;
+ Names.push_back(Arg);
+ }
+ auto *Pack = make<NodeArrayNode>(popTrailingNodeArray(ArgsBegin));
+ if (!Pack)
+ return nullptr;
+ return make<EnclosingExpr>("sizeof... (", Pack, ")");
+ }
+ }
+ return nullptr;
+ case 't':
+ switch (First[1]) {
+ case 'e': {
+ First += 2;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return Ex;
+ return make<EnclosingExpr>("typeid (", Ex, ")");
+ }
+ case 'i': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return Ty;
+ return make<EnclosingExpr>("typeid (", Ty, ")");
+ }
+ case 'l': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ size_t InitsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *E = getDerived().parseBracedExpr();
+ if (E == nullptr)
+ return nullptr;
+ Names.push_back(E);
+ }
+ return make<InitListExpr>(Ty, popTrailingNodeArray(InitsBegin));
+ }
+ case 'r':
+ First += 2;
+ return make<NameType>("throw");
+ case 'w': {
+ First += 2;
+ Node *Ex = getDerived().parseExpr();
+ if (Ex == nullptr)
+ return nullptr;
+ return make<ThrowExpr>(Ex);
+ }
+ }
+ return nullptr;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return getDerived().parseUnresolvedName();
+ }
+ return nullptr;
+}
+
+// <call-offset> ::= h <nv-offset> _
+// ::= v <v-offset> _
+//
+// <nv-offset> ::= <offset number>
+// # non-virtual base override
+//
+// <v-offset> ::= <offset number> _ <virtual offset number>
+// # virtual base override, with vcall offset
+template <typename Alloc, typename Derived>
+bool AbstractManglingParser<Alloc, Derived>::parseCallOffset() {
+ // Just scan through the call offset, we never add this information into the
+ // output.
+ if (consumeIf('h'))
+ return parseNumber(true).empty() || !consumeIf('_');
+ if (consumeIf('v'))
+ return parseNumber(true).empty() || !consumeIf('_') ||
+ parseNumber(true).empty() || !consumeIf('_');
+ return true;
+}
+
+// <special-name> ::= TV <type> # virtual table
+// ::= TT <type> # VTT structure (construction vtable index)
+// ::= TI <type> # typeinfo structure
+// ::= TS <type> # typeinfo name (null-terminated byte string)
+// ::= Tc <call-offset> <call-offset> <base encoding>
+// # base is the nominal target function of thunk
+// # first call-offset is 'this' adjustment
+// # second call-offset is result adjustment
+// ::= T <call-offset> <base encoding>
+// # base is the nominal target function of thunk
+// ::= GV <object name> # Guard variable for one-time initialization
+// # No <type>
+// ::= TW <object name> # Thread-local wrapper
+// ::= TH <object name> # Thread-local initialization
+// ::= GR <object name> _ # First temporary
+// ::= GR <object name> <seq-id> _ # Subsequent temporaries
+// extension ::= TC <first type> <number> _ <second type> # construction vtable for second-in-first
+// extension ::= GR <object name> # reference temporary for object
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseSpecialName() {
+ switch (look()) {
+ case 'T':
+ switch (look(1)) {
+ // TV <type> # virtual table
+ case 'V': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<SpecialName>("vtable for ", Ty);
+ }
+ // TT <type> # VTT structure (construction vtable index)
+ case 'T': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<SpecialName>("VTT for ", Ty);
+ }
+ // TI <type> # typeinfo structure
+ case 'I': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<SpecialName>("typeinfo for ", Ty);
+ }
+ // TS <type> # typeinfo name (null-terminated byte string)
+ case 'S': {
+ First += 2;
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ return make<SpecialName>("typeinfo name for ", Ty);
+ }
+ // Tc <call-offset> <call-offset> <base encoding>
+ case 'c': {
+ First += 2;
+ if (parseCallOffset() || parseCallOffset())
+ return nullptr;
+ Node *Encoding = getDerived().parseEncoding();
+ if (Encoding == nullptr)
+ return nullptr;
+ return make<SpecialName>("covariant return thunk to ", Encoding);
+ }
+ // extension ::= TC <first type> <number> _ <second type>
+ // # construction vtable for second-in-first
+ case 'C': {
+ First += 2;
+ Node *FirstType = getDerived().parseType();
+ if (FirstType == nullptr)
+ return nullptr;
+ if (parseNumber(true).empty() || !consumeIf('_'))
+ return nullptr;
+ Node *SecondType = getDerived().parseType();
+ if (SecondType == nullptr)
+ return nullptr;
+ return make<CtorVtableSpecialName>(SecondType, FirstType);
+ }
+ // TW <object name> # Thread-local wrapper
+ case 'W': {
+ First += 2;
+ Node *Name = getDerived().parseName();
+ if (Name == nullptr)
+ return nullptr;
+ return make<SpecialName>("thread-local wrapper routine for ", Name);
+ }
+ // TH <object name> # Thread-local initialization
+ case 'H': {
+ First += 2;
+ Node *Name = getDerived().parseName();
+ if (Name == nullptr)
+ return nullptr;
+ return make<SpecialName>("thread-local initialization routine for ", Name);
+ }
+ // T <call-offset> <base encoding>
+ default: {
+ ++First;
+ bool IsVirt = look() == 'v';
+ if (parseCallOffset())
+ return nullptr;
+ Node *BaseEncoding = getDerived().parseEncoding();
+ if (BaseEncoding == nullptr)
+ return nullptr;
+ if (IsVirt)
+ return make<SpecialName>("virtual thunk to ", BaseEncoding);
+ else
+ return make<SpecialName>("non-virtual thunk to ", BaseEncoding);
+ }
+ }
+ case 'G':
+ switch (look(1)) {
+ // GV <object name> # Guard variable for one-time initialization
+ case 'V': {
+ First += 2;
+ Node *Name = getDerived().parseName();
+ if (Name == nullptr)
+ return nullptr;
+ return make<SpecialName>("guard variable for ", Name);
+ }
+ // GR <object name> # reference temporary for object
+ // GR <object name> _ # First temporary
+ // GR <object name> <seq-id> _ # Subsequent temporaries
+ case 'R': {
+ First += 2;
+ Node *Name = getDerived().parseName();
+ if (Name == nullptr)
+ return nullptr;
+ size_t Count;
+ bool ParsedSeqId = !parseSeqId(&Count);
+ if (!consumeIf('_') && ParsedSeqId)
+ return nullptr;
+ return make<SpecialName>("reference temporary for ", Name);
+ }
+ }
+ }
+ return nullptr;
+}
+
+// <encoding> ::= <function name> <bare-function-type>
+// ::= <data name>
+// ::= <special-name>
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseEncoding() {
+ if (look() == 'G' || look() == 'T')
+ return getDerived().parseSpecialName();
+
+ auto IsEndOfEncoding = [&] {
+ // The set of chars that can potentially follow an <encoding> (none of which
+ // can start a <type>). Enumerating these allows us to avoid speculative
+ // parsing.
+ return numLeft() == 0 || look() == 'E' || look() == '.' || look() == '_';
+ };
+
+ NameState NameInfo(this);
+ Node *Name = getDerived().parseName(&NameInfo);
+ if (Name == nullptr)
+ return nullptr;
+
+ if (resolveForwardTemplateRefs(NameInfo))
+ return nullptr;
+
+ if (IsEndOfEncoding())
+ return Name;
+
+ Node *Attrs = nullptr;
+ if (consumeIf("Ua9enable_ifI")) {
+ size_t BeforeArgs = Names.size();
+ while (!consumeIf('E')) {
+ Node *Arg = getDerived().parseTemplateArg();
+ if (Arg == nullptr)
+ return nullptr;
+ Names.push_back(Arg);
+ }
+ Attrs = make<EnableIfAttr>(popTrailingNodeArray(BeforeArgs));
+ if (!Attrs)
+ return nullptr;
+ }
+
+ Node *ReturnType = nullptr;
+ if (!NameInfo.CtorDtorConversion && NameInfo.EndsWithTemplateArgs) {
+ ReturnType = getDerived().parseType();
+ if (ReturnType == nullptr)
+ return nullptr;
+ }
+
+ if (consumeIf('v'))
+ return make<FunctionEncoding>(ReturnType, Name, NodeArray(),
+ Attrs, NameInfo.CVQualifiers,
+ NameInfo.ReferenceQualifier);
+
+ size_t ParamsBegin = Names.size();
+ do {
+ Node *Ty = getDerived().parseType();
+ if (Ty == nullptr)
+ return nullptr;
+ Names.push_back(Ty);
+ } while (!IsEndOfEncoding());
+
+ return make<FunctionEncoding>(ReturnType, Name,
+ popTrailingNodeArray(ParamsBegin),
+ Attrs, NameInfo.CVQualifiers,
+ NameInfo.ReferenceQualifier);
+}
+
+template <class Float>
+struct FloatData;
+
+template <>
+struct FloatData<float>
+{
+ static const size_t mangled_size = 8;
+ static const size_t max_demangled_size = 24;
+ static constexpr const char* spec = "%af";
+};
+
+template <>
+struct FloatData<double>
+{
+ static const size_t mangled_size = 16;
+ static const size_t max_demangled_size = 32;
+ static constexpr const char* spec = "%a";
+};
+
+template <>
+struct FloatData<long double>
+{
+#if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \
+ defined(__wasm__)
+ static const size_t mangled_size = 32;
+#elif defined(__arm__) || defined(__mips__) || defined(__hexagon__)
+ static const size_t mangled_size = 16;
+#else
+ static const size_t mangled_size = 20; // May need to be adjusted to 16 or 24 on other platforms
+#endif
+ static const size_t max_demangled_size = 40;
+ static constexpr const char *spec = "%LaL";
+};
+
+template <typename Alloc, typename Derived>
+template <class Float>
+Node *AbstractManglingParser<Alloc, Derived>::parseFloatingLiteral() {
+ const size_t N = FloatData<Float>::mangled_size;
+ if (numLeft() <= N)
+ return nullptr;
+ StringView Data(First, First + N);
+ for (char C : Data)
+ if (!std::isxdigit(C))
+ return nullptr;
+ First += N;
+ if (!consumeIf('E'))
+ return nullptr;
+ return make<FloatLiteralImpl<Float>>(Data);
+}
+
+// <seq-id> ::= <0-9A-Z>+
+template <typename Alloc, typename Derived>
+bool AbstractManglingParser<Alloc, Derived>::parseSeqId(size_t *Out) {
+ if (!(look() >= '0' && look() <= '9') &&
+ !(look() >= 'A' && look() <= 'Z'))
+ return true;
+
+ size_t Id = 0;
+ while (true) {
+ if (look() >= '0' && look() <= '9') {
+ Id *= 36;
+ Id += static_cast<size_t>(look() - '0');
+ } else if (look() >= 'A' && look() <= 'Z') {
+ Id *= 36;
+ Id += static_cast<size_t>(look() - 'A') + 10;
+ } else {
+ *Out = Id;
+ return false;
+ }
+ ++First;
+ }
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+// <substitution> ::= Sa # ::std::allocator
+// <substitution> ::= Sb # ::std::basic_string
+// <substitution> ::= Ss # ::std::basic_string < char,
+// ::std::char_traits<char>,
+// ::std::allocator<char> >
+// <substitution> ::= Si # ::std::basic_istream<char, std::char_traits<char> >
+// <substitution> ::= So # ::std::basic_ostream<char, std::char_traits<char> >
+// <substitution> ::= Sd # ::std::basic_iostream<char, std::char_traits<char> >
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseSubstitution() {
+ if (!consumeIf('S'))
+ return nullptr;
+
+ if (std::islower(look())) {
+ Node *SpecialSub;
+ switch (look()) {
+ case 'a':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::allocator);
+ break;
+ case 'b':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::basic_string);
+ break;
+ case 's':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::string);
+ break;
+ case 'i':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::istream);
+ break;
+ case 'o':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::ostream);
+ break;
+ case 'd':
+ ++First;
+ SpecialSub = make<SpecialSubstitution>(SpecialSubKind::iostream);
+ break;
+ default:
+ return nullptr;
+ }
+ if (!SpecialSub)
+ return nullptr;
+ // Itanium C++ ABI 5.1.2: If a name that would use a built-in <substitution>
+ // has ABI tags, the tags are appended to the substitution; the result is a
+ // substitutable component.
+ Node *WithTags = getDerived().parseAbiTags(SpecialSub);
+ if (WithTags != SpecialSub) {
+ Subs.push_back(WithTags);
+ SpecialSub = WithTags;
+ }
+ return SpecialSub;
+ }
+
+ // ::= S_
+ if (consumeIf('_')) {
+ if (Subs.empty())
+ return nullptr;
+ return Subs[0];
+ }
+
+ // ::= S <seq-id> _
+ size_t Index = 0;
+ if (parseSeqId(&Index))
+ return nullptr;
+ ++Index;
+ if (!consumeIf('_') || Index >= Subs.size())
+ return nullptr;
+ return Subs[Index];
+}
+
+// <template-param> ::= T_ # first template parameter
+// ::= T <parameter-2 non-negative number> _
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseTemplateParam() {
+ if (!consumeIf('T'))
+ return nullptr;
+
+ size_t Index = 0;
+ if (!consumeIf('_')) {
+ if (parsePositiveInteger(&Index))
+ return nullptr;
+ ++Index;
+ if (!consumeIf('_'))
+ return nullptr;
+ }
+
+ // Itanium ABI 5.1.8: In a generic lambda, uses of auto in the parameter list
+ // are mangled as the corresponding artificial template type parameter.
+ if (ParsingLambdaParams)
+ return make<NameType>("auto");
+
+ // If we're in a context where this <template-param> refers to a
+ // <template-arg> further ahead in the mangled name (currently just conversion
+ // operator types), then we should only look it up in the right context.
+ if (PermitForwardTemplateReferences) {
+ Node *ForwardRef = make<ForwardTemplateReference>(Index);
+ if (!ForwardRef)
+ return nullptr;
+ assert(ForwardRef->getKind() == Node::KForwardTemplateReference);
+ ForwardTemplateRefs.push_back(
+ static_cast<ForwardTemplateReference *>(ForwardRef));
+ return ForwardRef;
+ }
+
+ if (Index >= TemplateParams.size())
+ return nullptr;
+ return TemplateParams[Index];
+}
+
+// <template-arg> ::= <type> # type or template
+// ::= X <expression> E # expression
+// ::= <expr-primary> # simple expressions
+// ::= J <template-arg>* E # argument pack
+// ::= LZ <encoding> E # extension
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parseTemplateArg() {
+ switch (look()) {
+ case 'X': {
+ ++First;
+ Node *Arg = getDerived().parseExpr();
+ if (Arg == nullptr || !consumeIf('E'))
+ return nullptr;
+ return Arg;
+ }
+ case 'J': {
+ ++First;
+ size_t ArgsBegin = Names.size();
+ while (!consumeIf('E')) {
+ Node *Arg = getDerived().parseTemplateArg();
+ if (Arg == nullptr)
+ return nullptr;
+ Names.push_back(Arg);
+ }
+ NodeArray Args = popTrailingNodeArray(ArgsBegin);
+ return make<TemplateArgumentPack>(Args);
+ }
+ case 'L': {
+ // ::= LZ <encoding> E # extension
+ if (look(1) == 'Z') {
+ First += 2;
+ Node *Arg = getDerived().parseEncoding();
+ if (Arg == nullptr || !consumeIf('E'))
+ return nullptr;
+ return Arg;
+ }
+ // ::= <expr-primary> # simple expressions
+ return getDerived().parseExprPrimary();
+ }
+ default:
+ return getDerived().parseType();
+ }
+}
+
+// <template-args> ::= I <template-arg>* E
+// extension, the abi says <template-arg>+
+template <typename Derived, typename Alloc>
+Node *
+AbstractManglingParser<Derived, Alloc>::parseTemplateArgs(bool TagTemplates) {
+ if (!consumeIf('I'))
+ return nullptr;
+
+ // <template-params> refer to the innermost <template-args>. Clear out any
+ // outer args that we may have inserted into TemplateParams.
+ if (TagTemplates)
+ TemplateParams.clear();
+
+ size_t ArgsBegin = Names.size();
+ while (!consumeIf('E')) {
+ if (TagTemplates) {
+ auto OldParams = std::move(TemplateParams);
+ Node *Arg = getDerived().parseTemplateArg();
+ TemplateParams = std::move(OldParams);
+ if (Arg == nullptr)
+ return nullptr;
+ Names.push_back(Arg);
+ Node *TableEntry = Arg;
+ if (Arg->getKind() == Node::KTemplateArgumentPack) {
+ TableEntry = make<ParameterPack>(
+ static_cast<TemplateArgumentPack*>(TableEntry)->getElements());
+ if (!TableEntry)
+ return nullptr;
+ }
+ TemplateParams.push_back(TableEntry);
+ } else {
+ Node *Arg = getDerived().parseTemplateArg();
+ if (Arg == nullptr)
+ return nullptr;
+ Names.push_back(Arg);
+ }
+ }
+ return make<TemplateArgs>(popTrailingNodeArray(ArgsBegin));
+}
+
+// <mangled-name> ::= _Z <encoding>
+// ::= <type>
+// extension ::= ___Z <encoding> _block_invoke
+// extension ::= ___Z <encoding> _block_invoke<decimal-digit>+
+// extension ::= ___Z <encoding> _block_invoke_<decimal-digit>+
+template <typename Derived, typename Alloc>
+Node *AbstractManglingParser<Derived, Alloc>::parse() {
+ if (consumeIf("_Z")) {
+ Node *Encoding = getDerived().parseEncoding();
+ if (Encoding == nullptr)
+ return nullptr;
+ if (look() == '.') {
+ Encoding = make<DotSuffix>(Encoding, StringView(First, Last));
+ First = Last;
+ }
+ if (numLeft() != 0)
+ return nullptr;
+ return Encoding;
+ }
+
+ if (consumeIf("___Z")) {
+ Node *Encoding = getDerived().parseEncoding();
+ if (Encoding == nullptr || !consumeIf("_block_invoke"))
+ return nullptr;
+ bool RequireNumber = consumeIf('_');
+ if (parseNumber().empty() && RequireNumber)
+ return nullptr;
+ if (look() == '.')
+ First = Last;
+ if (numLeft() != 0)
+ return nullptr;
+ return make<SpecialName>("invocation function for block in ", Encoding);
+ }
+
+ Node *Ty = getDerived().parseType();
+ if (numLeft() != 0)
+ return nullptr;
+ return Ty;
+}
+
+template <typename Alloc>
+struct ManglingParser : AbstractManglingParser<ManglingParser<Alloc>, Alloc> {
+ using AbstractManglingParser<ManglingParser<Alloc>,
+ Alloc>::AbstractManglingParser;
+};
+
+} // namespace itanium_demangle
+} // namespace llvm
+
+#endif // LLVM_DEMANGLE_ITANIUMDEMANGLE_H
diff --git a/contrib/llvm/include/llvm/Demangle/MicrosoftDemangle.h b/contrib/llvm/include/llvm/Demangle/MicrosoftDemangle.h
new file mode 100644
index 000000000000..97b918fc9459
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/MicrosoftDemangle.h
@@ -0,0 +1,276 @@
+//===------------------------- MicrosoftDemangle.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEMANGLE_MICROSOFT_DEMANGLE_H
+#define LLVM_DEMANGLE_MICROSOFT_DEMANGLE_H
+
+#include "llvm/Demangle/Compiler.h"
+#include "llvm/Demangle/MicrosoftDemangleNodes.h"
+#include "llvm/Demangle/StringView.h"
+#include "llvm/Demangle/Utility.h"
+
+#include <utility>
+
+namespace llvm {
+namespace ms_demangle {
+// This memory allocator is extremely fast, but it doesn't call dtors
+// for allocated objects. That means you can't use STL containers
+// (such as std::vector) with this allocator. But it pays off --
+// the demangler is 3x faster with this allocator compared to one with
+// STL containers.
+constexpr size_t AllocUnit = 4096;
+
+class ArenaAllocator {
+ struct AllocatorNode {
+ uint8_t *Buf = nullptr;
+ size_t Used = 0;
+ size_t Capacity = 0;
+ AllocatorNode *Next = nullptr;
+ };
+
+ void addNode(size_t Capacity) {
+ AllocatorNode *NewHead = new AllocatorNode;
+ NewHead->Buf = new uint8_t[Capacity];
+ NewHead->Next = Head;
+ NewHead->Capacity = Capacity;
+ Head = NewHead;
+ NewHead->Used = 0;
+ }
+
+public:
+ ArenaAllocator() { addNode(AllocUnit); }
+
+ ~ArenaAllocator() {
+ while (Head) {
+ assert(Head->Buf);
+ delete[] Head->Buf;
+ AllocatorNode *Next = Head->Next;
+ delete Head;
+ Head = Next;
+ }
+ }
+
+ char *allocUnalignedBuffer(size_t Length) {
+ uint8_t *Buf = Head->Buf + Head->Used;
+
+ Head->Used += Length;
+ if (Head->Used > Head->Capacity) {
+ // It's possible we need a buffer which is larger than our default unit
+ // size, so we need to be careful to add a node with capacity that is at
+ // least as large as what we need.
+ addNode(std::max(AllocUnit, Length));
+ Head->Used = Length;
+ Buf = Head->Buf;
+ }
+
+ return reinterpret_cast<char *>(Buf);
+ }
+
+ template <typename T, typename... Args> T *allocArray(size_t Count) {
+
+ size_t Size = Count * sizeof(T);
+ assert(Head && Head->Buf);
+
+ size_t P = (size_t)Head->Buf + Head->Used;
+ uintptr_t AlignedP =
+ (((size_t)P + alignof(T) - 1) & ~(size_t)(alignof(T) - 1));
+ uint8_t *PP = (uint8_t *)AlignedP;
+ size_t Adjustment = AlignedP - P;
+
+ Head->Used += Size + Adjustment;
+ if (Head->Used < Head->Capacity)
+ return new (PP) T[Count]();
+
+ addNode(AllocUnit);
+ Head->Used = Size;
+ return new (Head->Buf) T[Count]();
+ }
+
+ template <typename T, typename... Args> T *alloc(Args &&... ConstructorArgs) {
+
+ size_t Size = sizeof(T);
+ assert(Head && Head->Buf);
+
+ size_t P = (size_t)Head->Buf + Head->Used;
+ uintptr_t AlignedP =
+ (((size_t)P + alignof(T) - 1) & ~(size_t)(alignof(T) - 1));
+ uint8_t *PP = (uint8_t *)AlignedP;
+ size_t Adjustment = AlignedP - P;
+
+ Head->Used += Size + Adjustment;
+ if (Head->Used < Head->Capacity)
+ return new (PP) T(std::forward<Args>(ConstructorArgs)...);
+
+ addNode(AllocUnit);
+ Head->Used = Size;
+ return new (Head->Buf) T(std::forward<Args>(ConstructorArgs)...);
+ }
+
+private:
+ AllocatorNode *Head = nullptr;
+};
+
+struct BackrefContext {
+ static constexpr size_t Max = 10;
+
+ TypeNode *FunctionParams[Max];
+ size_t FunctionParamCount = 0;
+
+ // The first 10 BackReferences in a mangled name can be back-referenced by
+ // special name @[0-9]. This is a storage for the first 10 BackReferences.
+ NamedIdentifierNode *Names[Max];
+ size_t NamesCount = 0;
+};
+
+enum class QualifierMangleMode { Drop, Mangle, Result };
+
+enum NameBackrefBehavior : uint8_t {
+ NBB_None = 0, // don't save any names as backrefs.
+ NBB_Template = 1 << 0, // save template instanations.
+ NBB_Simple = 1 << 1, // save simple names.
+};
+
+enum class FunctionIdentifierCodeGroup { Basic, Under, DoubleUnder };
+
+// Demangler class takes the main role in demangling symbols.
+// It has a set of functions to parse mangled symbols into Type instances.
+// It also has a set of functions to convert Type instances to strings.
+class Demangler {
+public:
+ Demangler() = default;
+ virtual ~Demangler() = default;
+
+ // You are supposed to call parse() first and then check if error is true. If
+ // it is false, call output() to write the formatted name to the given stream.
+ SymbolNode *parse(StringView &MangledName);
+
+ TagTypeNode *parseTagUniqueName(StringView &MangledName);
+
+ // True if an error occurred.
+ bool Error = false;
+
+ void dumpBackReferences();
+
+private:
+ SymbolNode *demangleEncodedSymbol(StringView &MangledName,
+ QualifiedNameNode *QN);
+
+ VariableSymbolNode *demangleVariableEncoding(StringView &MangledName,
+ StorageClass SC);
+ FunctionSymbolNode *demangleFunctionEncoding(StringView &MangledName);
+
+ Qualifiers demanglePointerExtQualifiers(StringView &MangledName);
+
+ // Parser functions. This is a recursive-descent parser.
+ TypeNode *demangleType(StringView &MangledName, QualifierMangleMode QMM);
+ PrimitiveTypeNode *demanglePrimitiveType(StringView &MangledName);
+ CustomTypeNode *demangleCustomType(StringView &MangledName);
+ TagTypeNode *demangleClassType(StringView &MangledName);
+ PointerTypeNode *demanglePointerType(StringView &MangledName);
+ PointerTypeNode *demangleMemberPointerType(StringView &MangledName);
+ FunctionSignatureNode *demangleFunctionType(StringView &MangledName,
+ bool HasThisQuals);
+
+ ArrayTypeNode *demangleArrayType(StringView &MangledName);
+
+ NodeArrayNode *demangleTemplateParameterList(StringView &MangledName);
+ NodeArrayNode *demangleFunctionParameterList(StringView &MangledName);
+
+ std::pair<uint64_t, bool> demangleNumber(StringView &MangledName);
+ uint64_t demangleUnsigned(StringView &MangledName);
+ int64_t demangleSigned(StringView &MangledName);
+
+ void memorizeString(StringView s);
+ void memorizeIdentifier(IdentifierNode *Identifier);
+
+ /// Allocate a copy of \p Borrowed into memory that we own.
+ StringView copyString(StringView Borrowed);
+
+ QualifiedNameNode *demangleFullyQualifiedTypeName(StringView &MangledName);
+ QualifiedNameNode *demangleFullyQualifiedSymbolName(StringView &MangledName);
+
+ IdentifierNode *demangleUnqualifiedTypeName(StringView &MangledName,
+ bool Memorize);
+ IdentifierNode *demangleUnqualifiedSymbolName(StringView &MangledName,
+ NameBackrefBehavior NBB);
+
+ QualifiedNameNode *demangleNameScopeChain(StringView &MangledName,
+ IdentifierNode *UnqualifiedName);
+ IdentifierNode *demangleNameScopePiece(StringView &MangledName);
+
+ NamedIdentifierNode *demangleBackRefName(StringView &MangledName);
+ IdentifierNode *demangleTemplateInstantiationName(StringView &MangledName,
+ NameBackrefBehavior NBB);
+ IdentifierNode *demangleFunctionIdentifierCode(StringView &MangledName);
+ IdentifierNode *
+ demangleFunctionIdentifierCode(StringView &MangledName,
+ FunctionIdentifierCodeGroup Group);
+ StructorIdentifierNode *demangleStructorIdentifier(StringView &MangledName,
+ bool IsDestructor);
+ ConversionOperatorIdentifierNode *
+ demangleConversionOperatorIdentifier(StringView &MangledName);
+ LiteralOperatorIdentifierNode *
+ demangleLiteralOperatorIdentifier(StringView &MangledName);
+
+ SymbolNode *demangleSpecialIntrinsic(StringView &MangledName);
+ SpecialTableSymbolNode *
+ demangleSpecialTableSymbolNode(StringView &MangledName,
+ SpecialIntrinsicKind SIK);
+ LocalStaticGuardVariableNode *
+ demangleLocalStaticGuard(StringView &MangledName);
+ VariableSymbolNode *demangleUntypedVariable(ArenaAllocator &Arena,
+ StringView &MangledName,
+ StringView VariableName);
+ VariableSymbolNode *
+ demangleRttiBaseClassDescriptorNode(ArenaAllocator &Arena,
+ StringView &MangledName);
+ FunctionSymbolNode *demangleInitFiniStub(StringView &MangledName,
+ bool IsDestructor);
+
+ NamedIdentifierNode *demangleSimpleName(StringView &MangledName,
+ bool Memorize);
+ NamedIdentifierNode *demangleAnonymousNamespaceName(StringView &MangledName);
+ NamedIdentifierNode *demangleLocallyScopedNamePiece(StringView &MangledName);
+ EncodedStringLiteralNode *demangleStringLiteral(StringView &MangledName);
+ FunctionSymbolNode *demangleVcallThunkNode(StringView &MangledName);
+
+ StringView demangleSimpleString(StringView &MangledName, bool Memorize);
+
+ FuncClass demangleFunctionClass(StringView &MangledName);
+ CallingConv demangleCallingConvention(StringView &MangledName);
+ StorageClass demangleVariableStorageClass(StringView &MangledName);
+ bool demangleThrowSpecification(StringView &MangledName);
+ wchar_t demangleWcharLiteral(StringView &MangledName);
+ uint8_t demangleCharLiteral(StringView &MangledName);
+
+ std::pair<Qualifiers, bool> demangleQualifiers(StringView &MangledName);
+
+ // Memory allocator.
+ ArenaAllocator Arena;
+
+ // A single type uses one global back-ref table for all function params.
+ // This means back-refs can even go "into" other types. Examples:
+ //
+ // // Second int* is a back-ref to first.
+ // void foo(int *, int*);
+ //
+ // // Second int* is not a back-ref to first (first is not a function param).
+ // int* foo(int*);
+ //
+ // // Second int* is a back-ref to first (ALL function types share the same
+ // // back-ref map.
+ // using F = void(*)(int*);
+ // F G(int *);
+ BackrefContext Backrefs;
+};
+
+} // namespace ms_demangle
+} // namespace llvm
+
+#endif // LLVM_DEMANGLE_MICROSOFT_DEMANGLE_H
diff --git a/contrib/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h b/contrib/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
new file mode 100644
index 000000000000..9e3478e9fd29
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
@@ -0,0 +1,605 @@
+#ifndef LLVM_SUPPORT_MICROSOFTDEMANGLENODES_H
+#define LLVM_SUPPORT_MICROSOFTDEMANGLENODES_H
+
+#include "llvm/Demangle/Compiler.h"
+#include "llvm/Demangle/StringView.h"
+#include <array>
+
+class OutputStream;
+
+namespace llvm {
+namespace ms_demangle {
+
+// Storage classes
+enum Qualifiers : uint8_t {
+ Q_None = 0,
+ Q_Const = 1 << 0,
+ Q_Volatile = 1 << 1,
+ Q_Far = 1 << 2,
+ Q_Huge = 1 << 3,
+ Q_Unaligned = 1 << 4,
+ Q_Restrict = 1 << 5,
+ Q_Pointer64 = 1 << 6
+};
+
+enum class StorageClass : uint8_t {
+ None,
+ PrivateStatic,
+ ProtectedStatic,
+ PublicStatic,
+ Global,
+ FunctionLocalStatic,
+};
+
+enum class PointerAffinity { None, Pointer, Reference, RValueReference };
+enum class FunctionRefQualifier { None, Reference, RValueReference };
+
+// Calling conventions
+enum class CallingConv : uint8_t {
+ None,
+ Cdecl,
+ Pascal,
+ Thiscall,
+ Stdcall,
+ Fastcall,
+ Clrcall,
+ Eabi,
+ Vectorcall,
+ Regcall,
+};
+
+enum class ReferenceKind : uint8_t { None, LValueRef, RValueRef };
+
+enum OutputFlags {
+ OF_Default = 0,
+ OF_NoCallingConvention = 1,
+ OF_NoTagSpecifier = 2,
+};
+
+// Types
+enum class PrimitiveKind {
+ Void,
+ Bool,
+ Char,
+ Schar,
+ Uchar,
+ Char16,
+ Char32,
+ Short,
+ Ushort,
+ Int,
+ Uint,
+ Long,
+ Ulong,
+ Int64,
+ Uint64,
+ Wchar,
+ Float,
+ Double,
+ Ldouble,
+ Nullptr,
+};
+
+enum class CharKind {
+ Char,
+ Char16,
+ Char32,
+ Wchar,
+};
+
+enum class IntrinsicFunctionKind : uint8_t {
+ None,
+ New, // ?2 # operator new
+ Delete, // ?3 # operator delete
+ Assign, // ?4 # operator=
+ RightShift, // ?5 # operator>>
+ LeftShift, // ?6 # operator<<
+ LogicalNot, // ?7 # operator!
+ Equals, // ?8 # operator==
+ NotEquals, // ?9 # operator!=
+ ArraySubscript, // ?A # operator[]
+ Pointer, // ?C # operator->
+ Dereference, // ?D # operator*
+ Increment, // ?E # operator++
+ Decrement, // ?F # operator--
+ Minus, // ?G # operator-
+ Plus, // ?H # operator+
+ BitwiseAnd, // ?I # operator&
+ MemberPointer, // ?J # operator->*
+ Divide, // ?K # operator/
+ Modulus, // ?L # operator%
+ LessThan, // ?M operator<
+ LessThanEqual, // ?N operator<=
+ GreaterThan, // ?O operator>
+ GreaterThanEqual, // ?P operator>=
+ Comma, // ?Q operator,
+ Parens, // ?R operator()
+ BitwiseNot, // ?S operator~
+ BitwiseXor, // ?T operator^
+ BitwiseOr, // ?U operator|
+ LogicalAnd, // ?V operator&&
+ LogicalOr, // ?W operator||
+ TimesEqual, // ?X operator*=
+ PlusEqual, // ?Y operator+=
+ MinusEqual, // ?Z operator-=
+ DivEqual, // ?_0 operator/=
+ ModEqual, // ?_1 operator%=
+ RshEqual, // ?_2 operator>>=
+ LshEqual, // ?_3 operator<<=
+ BitwiseAndEqual, // ?_4 operator&=
+ BitwiseOrEqual, // ?_5 operator|=
+ BitwiseXorEqual, // ?_6 operator^=
+ VbaseDtor, // ?_D # vbase destructor
+ VecDelDtor, // ?_E # vector deleting destructor
+ DefaultCtorClosure, // ?_F # default constructor closure
+ ScalarDelDtor, // ?_G # scalar deleting destructor
+ VecCtorIter, // ?_H # vector constructor iterator
+ VecDtorIter, // ?_I # vector destructor iterator
+ VecVbaseCtorIter, // ?_J # vector vbase constructor iterator
+ VdispMap, // ?_K # virtual displacement map
+ EHVecCtorIter, // ?_L # eh vector constructor iterator
+ EHVecDtorIter, // ?_M # eh vector destructor iterator
+ EHVecVbaseCtorIter, // ?_N # eh vector vbase constructor iterator
+ CopyCtorClosure, // ?_O # copy constructor closure
+ LocalVftableCtorClosure, // ?_T # local vftable constructor closure
+ ArrayNew, // ?_U operator new[]
+ ArrayDelete, // ?_V operator delete[]
+ ManVectorCtorIter, // ?__A managed vector ctor iterator
+ ManVectorDtorIter, // ?__B managed vector dtor iterator
+ EHVectorCopyCtorIter, // ?__C EH vector copy ctor iterator
+ EHVectorVbaseCopyCtorIter, // ?__D EH vector vbase copy ctor iterator
+ VectorCopyCtorIter, // ?__G vector copy constructor iterator
+ VectorVbaseCopyCtorIter, // ?__H vector vbase copy constructor iterator
+ ManVectorVbaseCopyCtorIter, // ?__I managed vector vbase copy constructor
+ CoAwait, // ?__L co_await
+ Spaceship, // operator<=>
+ MaxIntrinsic
+};
+
+enum class SpecialIntrinsicKind {
+ None,
+ Vftable,
+ Vbtable,
+ Typeof,
+ VcallThunk,
+ LocalStaticGuard,
+ StringLiteralSymbol,
+ UdtReturning,
+ Unknown,
+ DynamicInitializer,
+ DynamicAtexitDestructor,
+ RttiTypeDescriptor,
+ RttiBaseClassDescriptor,
+ RttiBaseClassArray,
+ RttiClassHierarchyDescriptor,
+ RttiCompleteObjLocator,
+ LocalVftable,
+ LocalStaticThreadGuard,
+};
+
+// Function classes
+enum FuncClass : uint16_t {
+ FC_None = 0,
+ FC_Public = 1 << 0,
+ FC_Protected = 1 << 1,
+ FC_Private = 1 << 2,
+ FC_Global = 1 << 3,
+ FC_Static = 1 << 4,
+ FC_Virtual = 1 << 5,
+ FC_Far = 1 << 6,
+ FC_ExternC = 1 << 7,
+ FC_NoParameterList = 1 << 8,
+ FC_VirtualThisAdjust = 1 << 9,
+ FC_VirtualThisAdjustEx = 1 << 10,
+ FC_StaticThisAdjust = 1 << 11,
+};
+
+enum class TagKind { Class, Struct, Union, Enum };
+
+enum class NodeKind {
+ Unknown,
+ Md5Symbol,
+ PrimitiveType,
+ FunctionSignature,
+ Identifier,
+ NamedIdentifier,
+ VcallThunkIdentifier,
+ LocalStaticGuardIdentifier,
+ IntrinsicFunctionIdentifier,
+ ConversionOperatorIdentifier,
+ DynamicStructorIdentifier,
+ StructorIdentifier,
+ LiteralOperatorIdentifier,
+ ThunkSignature,
+ PointerType,
+ TagType,
+ ArrayType,
+ Custom,
+ IntrinsicType,
+ NodeArray,
+ QualifiedName,
+ TemplateParameterReference,
+ EncodedStringLiteral,
+ IntegerLiteral,
+ RttiBaseClassDescriptor,
+ LocalStaticGuardVariable,
+ FunctionSymbol,
+ VariableSymbol,
+ SpecialTableSymbol
+};
+
+struct Node {
+ explicit Node(NodeKind K) : Kind(K) {}
+ virtual ~Node() = default;
+
+ NodeKind kind() const { return Kind; }
+
+ virtual void output(OutputStream &OS, OutputFlags Flags) const = 0;
+
+ std::string toString(OutputFlags Flags = OF_Default) const;
+
+private:
+ NodeKind Kind;
+};
+
+struct TypeNode;
+struct PrimitiveTypeNode;
+struct FunctionSignatureNode;
+struct IdentifierNode;
+struct NamedIdentifierNode;
+struct VcallThunkIdentifierNode;
+struct IntrinsicFunctionIdentifierNode;
+struct LiteralOperatorIdentifierNode;
+struct ConversionOperatorIdentifierNode;
+struct StructorIdentifierNode;
+struct ThunkSignatureNode;
+struct PointerTypeNode;
+struct ArrayTypeNode;
+struct CustomNode;
+struct TagTypeNode;
+struct IntrinsicTypeNode;
+struct NodeArrayNode;
+struct QualifiedNameNode;
+struct TemplateParameterReferenceNode;
+struct EncodedStringLiteralNode;
+struct IntegerLiteralNode;
+struct RttiBaseClassDescriptorNode;
+struct LocalStaticGuardVariableNode;
+struct SymbolNode;
+struct FunctionSymbolNode;
+struct VariableSymbolNode;
+struct SpecialTableSymbolNode;
+
+struct TypeNode : public Node {
+ explicit TypeNode(NodeKind K) : Node(K) {}
+
+ virtual void outputPre(OutputStream &OS, OutputFlags Flags) const = 0;
+ virtual void outputPost(OutputStream &OS, OutputFlags Flags) const = 0;
+
+ void output(OutputStream &OS, OutputFlags Flags) const override {
+ outputPre(OS, Flags);
+ outputPost(OS, Flags);
+ }
+
+ void outputQuals(bool SpaceBefore, bool SpaceAfter) const;
+
+ Qualifiers Quals = Q_None;
+};
+
+struct PrimitiveTypeNode : public TypeNode {
+ explicit PrimitiveTypeNode(PrimitiveKind K)
+ : TypeNode(NodeKind::PrimitiveType), PrimKind(K) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const {}
+
+ PrimitiveKind PrimKind;
+};
+
+struct FunctionSignatureNode : public TypeNode {
+ explicit FunctionSignatureNode(NodeKind K) : TypeNode(K) {}
+ FunctionSignatureNode() : TypeNode(NodeKind::FunctionSignature) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const override;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const override;
+
+ // Valid if this FunctionTypeNode is the Pointee of a PointerType or
+ // MemberPointerType.
+ PointerAffinity Affinity = PointerAffinity::None;
+
+ // The function's calling convention.
+ CallingConv CallConvention = CallingConv::None;
+
+ // Function flags (gloabl, public, etc)
+ FuncClass FunctionClass = FC_Global;
+
+ FunctionRefQualifier RefQualifier = FunctionRefQualifier::None;
+
+ // The return type of the function.
+ TypeNode *ReturnType = nullptr;
+
+ // True if this is a C-style ... varargs function.
+ bool IsVariadic = false;
+
+ // Function parameters
+ NodeArrayNode *Params = nullptr;
+
+ // True if the function type is noexcept
+ bool IsNoexcept = false;
+};
+
+struct IdentifierNode : public Node {
+ explicit IdentifierNode(NodeKind K) : Node(K) {}
+
+ NodeArrayNode *TemplateParams = nullptr;
+
+protected:
+ void outputTemplateParameters(OutputStream &OS, OutputFlags Flags) const;
+};
+
+struct VcallThunkIdentifierNode : public IdentifierNode {
+ VcallThunkIdentifierNode() : IdentifierNode(NodeKind::VcallThunkIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ uint64_t OffsetInVTable = 0;
+};
+
+struct DynamicStructorIdentifierNode : public IdentifierNode {
+ DynamicStructorIdentifierNode()
+ : IdentifierNode(NodeKind::DynamicStructorIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ VariableSymbolNode *Variable = nullptr;
+ QualifiedNameNode *Name = nullptr;
+ bool IsDestructor = false;
+};
+
+struct NamedIdentifierNode : public IdentifierNode {
+ NamedIdentifierNode() : IdentifierNode(NodeKind::NamedIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ StringView Name;
+};
+
+struct IntrinsicFunctionIdentifierNode : public IdentifierNode {
+ explicit IntrinsicFunctionIdentifierNode(IntrinsicFunctionKind Operator)
+ : IdentifierNode(NodeKind::IntrinsicFunctionIdentifier),
+ Operator(Operator) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ IntrinsicFunctionKind Operator;
+};
+
+struct LiteralOperatorIdentifierNode : public IdentifierNode {
+ LiteralOperatorIdentifierNode()
+ : IdentifierNode(NodeKind::LiteralOperatorIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ StringView Name;
+};
+
+struct LocalStaticGuardIdentifierNode : public IdentifierNode {
+ LocalStaticGuardIdentifierNode()
+ : IdentifierNode(NodeKind::LocalStaticGuardIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ uint32_t ScopeIndex = 0;
+};
+
+struct ConversionOperatorIdentifierNode : public IdentifierNode {
+ ConversionOperatorIdentifierNode()
+ : IdentifierNode(NodeKind::ConversionOperatorIdentifier) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ // The type that this operator converts too.
+ TypeNode *TargetType = nullptr;
+};
+
+struct StructorIdentifierNode : public IdentifierNode {
+ StructorIdentifierNode() : IdentifierNode(NodeKind::StructorIdentifier) {}
+ explicit StructorIdentifierNode(bool IsDestructor)
+ : IdentifierNode(NodeKind::StructorIdentifier),
+ IsDestructor(IsDestructor) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ // The name of the class that this is a structor of.
+ IdentifierNode *Class = nullptr;
+ bool IsDestructor = false;
+};
+
+struct ThunkSignatureNode : public FunctionSignatureNode {
+ ThunkSignatureNode() : FunctionSignatureNode(NodeKind::ThunkSignature) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const override;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const override;
+
+ struct ThisAdjustor {
+ uint32_t StaticOffset = 0;
+ int32_t VBPtrOffset = 0;
+ int32_t VBOffsetOffset = 0;
+ int32_t VtordispOffset = 0;
+ };
+
+ ThisAdjustor ThisAdjust;
+};
+
+struct PointerTypeNode : public TypeNode {
+ PointerTypeNode() : TypeNode(NodeKind::PointerType) {}
+ void outputPre(OutputStream &OS, OutputFlags Flags) const override;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const override;
+
+ // Is this a pointer, reference, or rvalue-reference?
+ PointerAffinity Affinity = PointerAffinity::None;
+
+ // If this is a member pointer, this is the class that the member is in.
+ QualifiedNameNode *ClassParent = nullptr;
+
+ // Represents a type X in "a pointer to X", "a reference to X", or
+ // "rvalue-reference to X"
+ TypeNode *Pointee = nullptr;
+};
+
+struct TagTypeNode : public TypeNode {
+ explicit TagTypeNode(TagKind Tag) : TypeNode(NodeKind::TagType), Tag(Tag) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const;
+
+ QualifiedNameNode *QualifiedName = nullptr;
+ TagKind Tag;
+};
+
+struct ArrayTypeNode : public TypeNode {
+ ArrayTypeNode() : TypeNode(NodeKind::ArrayType) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const;
+
+ void outputDimensionsImpl(OutputStream &OS, OutputFlags Flags) const;
+ void outputOneDimension(OutputStream &OS, OutputFlags Flags, Node *N) const;
+
+ // A list of array dimensions. e.g. [3,4,5] in `int Foo[3][4][5]`
+ NodeArrayNode *Dimensions = nullptr;
+
+ // The type of array element.
+ TypeNode *ElementType = nullptr;
+};
+
+struct IntrinsicNode : public TypeNode {
+ IntrinsicNode() : TypeNode(NodeKind::IntrinsicType) {}
+ void output(OutputStream &OS, OutputFlags Flags) const override {}
+};
+
+struct CustomTypeNode : public TypeNode {
+ CustomTypeNode() : TypeNode(NodeKind::Custom) {}
+
+ void outputPre(OutputStream &OS, OutputFlags Flags) const override;
+ void outputPost(OutputStream &OS, OutputFlags Flags) const override;
+
+ IdentifierNode *Identifier;
+};
+
+struct NodeArrayNode : public Node {
+ NodeArrayNode() : Node(NodeKind::NodeArray) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ void output(OutputStream &OS, OutputFlags Flags, StringView Separator) const;
+
+ Node **Nodes = 0;
+ size_t Count = 0;
+};
+
+struct QualifiedNameNode : public Node {
+ QualifiedNameNode() : Node(NodeKind::QualifiedName) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ NodeArrayNode *Components = nullptr;
+
+ IdentifierNode *getUnqualifiedIdentifier() {
+ Node *LastComponent = Components->Nodes[Components->Count - 1];
+ return static_cast<IdentifierNode *>(LastComponent);
+ }
+};
+
+struct TemplateParameterReferenceNode : public Node {
+ TemplateParameterReferenceNode()
+ : Node(NodeKind::TemplateParameterReference) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ SymbolNode *Symbol = nullptr;
+
+ int ThunkOffsetCount = 0;
+ std::array<int64_t, 3> ThunkOffsets;
+ PointerAffinity Affinity = PointerAffinity::None;
+ bool IsMemberPointer = false;
+};
+
+struct IntegerLiteralNode : public Node {
+ IntegerLiteralNode() : Node(NodeKind::IntegerLiteral) {}
+ IntegerLiteralNode(uint64_t Value, bool IsNegative)
+ : Node(NodeKind::IntegerLiteral), Value(Value), IsNegative(IsNegative) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ uint64_t Value = 0;
+ bool IsNegative = false;
+};
+
+struct RttiBaseClassDescriptorNode : public IdentifierNode {
+ RttiBaseClassDescriptorNode()
+ : IdentifierNode(NodeKind::RttiBaseClassDescriptor) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ uint32_t NVOffset = 0;
+ int32_t VBPtrOffset = 0;
+ uint32_t VBTableOffset = 0;
+ uint32_t Flags = 0;
+};
+
+struct SymbolNode : public Node {
+ explicit SymbolNode(NodeKind K) : Node(K) {}
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+ QualifiedNameNode *Name = nullptr;
+};
+
+struct SpecialTableSymbolNode : public SymbolNode {
+ explicit SpecialTableSymbolNode()
+ : SymbolNode(NodeKind::SpecialTableSymbol) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+ QualifiedNameNode *TargetName = nullptr;
+ Qualifiers Quals;
+};
+
+struct LocalStaticGuardVariableNode : public SymbolNode {
+ LocalStaticGuardVariableNode()
+ : SymbolNode(NodeKind::LocalStaticGuardVariable) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ bool IsVisible = false;
+};
+
+struct EncodedStringLiteralNode : public SymbolNode {
+ EncodedStringLiteralNode() : SymbolNode(NodeKind::EncodedStringLiteral) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ StringView DecodedString;
+ bool IsTruncated = false;
+ CharKind Char = CharKind::Char;
+};
+
+struct VariableSymbolNode : public SymbolNode {
+ VariableSymbolNode() : SymbolNode(NodeKind::VariableSymbol) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ StorageClass SC = StorageClass::None;
+ TypeNode *Type = nullptr;
+};
+
+struct FunctionSymbolNode : public SymbolNode {
+ FunctionSymbolNode() : SymbolNode(NodeKind::FunctionSymbol) {}
+
+ void output(OutputStream &OS, OutputFlags Flags) const override;
+
+ FunctionSignatureNode *Signature = nullptr;
+};
+
+} // namespace ms_demangle
+} // namespace llvm
+
+#endif \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/Demangle/StringView.h b/contrib/llvm/include/llvm/Demangle/StringView.h
new file mode 100644
index 000000000000..a89deda694c2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/StringView.h
@@ -0,0 +1,121 @@
+//===--- StringView.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+// This file contains a limited version of LLVM's StringView class. It is
+// copied here so that LLVMDemangle need not take a dependency on LLVMSupport.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEMANGLE_STRINGVIEW_H
+#define LLVM_DEMANGLE_STRINGVIEW_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+
+class StringView {
+ const char *First;
+ const char *Last;
+
+public:
+ static const size_t npos = ~size_t(0);
+
+ template <size_t N>
+ StringView(const char (&Str)[N]) : First(Str), Last(Str + N - 1) {}
+ StringView(const char *First_, const char *Last_)
+ : First(First_), Last(Last_) {}
+ StringView(const char *First_, size_t Len)
+ : First(First_), Last(First_ + Len) {}
+ StringView(const char *Str) : First(Str), Last(Str + std::strlen(Str)) {}
+ StringView() : First(nullptr), Last(nullptr) {}
+
+ StringView substr(size_t From) const {
+ return StringView(begin() + From, size() - From);
+ }
+
+ size_t find(char C, size_t From = 0) const {
+ size_t FindBegin = std::min(From, size());
+ // Avoid calling memchr with nullptr.
+ if (FindBegin < size()) {
+ // Just forward to memchr, which is faster than a hand-rolled loop.
+ if (const void *P = ::memchr(First + FindBegin, C, size() - FindBegin))
+ return static_cast<const char *>(P) - First;
+ }
+ return npos;
+ }
+
+ StringView substr(size_t From, size_t To) const {
+ if (To >= size())
+ To = size() - 1;
+ if (From >= size())
+ From = size() - 1;
+ return StringView(First + From, First + To);
+ }
+
+ StringView dropFront(size_t N = 1) const {
+ if (N >= size())
+ N = size();
+ return StringView(First + N, Last);
+ }
+
+ StringView dropBack(size_t N = 1) const {
+ if (N >= size())
+ N = size();
+ return StringView(First, Last - N);
+ }
+
+ char front() const {
+ assert(!empty());
+ return *begin();
+ }
+
+ char back() const {
+ assert(!empty());
+ return *(end() - 1);
+ }
+
+ char popFront() {
+ assert(!empty());
+ return *First++;
+ }
+
+ bool consumeFront(char C) {
+ if (!startsWith(C))
+ return false;
+ *this = dropFront(1);
+ return true;
+ }
+
+ bool consumeFront(StringView S) {
+ if (!startsWith(S))
+ return false;
+ *this = dropFront(S.size());
+ return true;
+ }
+
+ bool startsWith(char C) const { return !empty() && *begin() == C; }
+
+ bool startsWith(StringView Str) const {
+ if (Str.size() > size())
+ return false;
+ return std::equal(Str.begin(), Str.end(), begin());
+ }
+
+ const char &operator[](size_t Idx) const { return *(begin() + Idx); }
+
+ const char *begin() const { return First; }
+ const char *end() const { return Last; }
+ size_t size() const { return static_cast<size_t>(Last - First); }
+ bool empty() const { return First == Last; }
+};
+
+inline bool operator==(const StringView &LHS, const StringView &RHS) {
+ return LHS.size() == RHS.size() &&
+ std::equal(LHS.begin(), LHS.end(), RHS.begin());
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Demangle/Utility.h b/contrib/llvm/include/llvm/Demangle/Utility.h
new file mode 100644
index 000000000000..1d1601c81635
--- /dev/null
+++ b/contrib/llvm/include/llvm/Demangle/Utility.h
@@ -0,0 +1,187 @@
+//===--- Utility.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+// This file contains several utility classes used by the demangle library.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEMANGLE_UTILITY_H
+#define LLVM_DEMANGLE_UTILITY_H
+
+#include "StringView.h"
+
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <limits>
+
+// Stream that AST nodes write their string representation into after the AST
+// has been parsed.
+class OutputStream {
+ char *Buffer;
+ size_t CurrentPosition;
+ size_t BufferCapacity;
+
+ // Ensure there is at least n more positions in buffer.
+ void grow(size_t N) {
+ if (N + CurrentPosition >= BufferCapacity) {
+ BufferCapacity *= 2;
+ if (BufferCapacity < N + CurrentPosition)
+ BufferCapacity = N + CurrentPosition;
+ Buffer = static_cast<char *>(std::realloc(Buffer, BufferCapacity));
+ if (Buffer == nullptr)
+ std::terminate();
+ }
+ }
+
+ void writeUnsigned(uint64_t N, bool isNeg = false) {
+ // Handle special case...
+ if (N == 0) {
+ *this << '0';
+ return;
+ }
+
+ char Temp[21];
+ char *TempPtr = std::end(Temp);
+
+ while (N) {
+ *--TempPtr = '0' + char(N % 10);
+ N /= 10;
+ }
+
+ // Add negative sign...
+ if (isNeg)
+ *--TempPtr = '-';
+ this->operator<<(StringView(TempPtr, std::end(Temp)));
+ }
+
+public:
+ OutputStream(char *StartBuf, size_t Size)
+ : Buffer(StartBuf), CurrentPosition(0), BufferCapacity(Size) {}
+ OutputStream() = default;
+ void reset(char *Buffer_, size_t BufferCapacity_) {
+ CurrentPosition = 0;
+ Buffer = Buffer_;
+ BufferCapacity = BufferCapacity_;
+ }
+
+ /// If a ParameterPackExpansion (or similar type) is encountered, the offset
+ /// into the pack that we're currently printing.
+ unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max();
+ unsigned CurrentPackMax = std::numeric_limits<unsigned>::max();
+
+ OutputStream &operator+=(StringView R) {
+ size_t Size = R.size();
+ if (Size == 0)
+ return *this;
+ grow(Size);
+ std::memmove(Buffer + CurrentPosition, R.begin(), Size);
+ CurrentPosition += Size;
+ return *this;
+ }
+
+ OutputStream &operator+=(char C) {
+ grow(1);
+ Buffer[CurrentPosition++] = C;
+ return *this;
+ }
+
+ OutputStream &operator<<(StringView R) { return (*this += R); }
+
+ OutputStream &operator<<(char C) { return (*this += C); }
+
+ OutputStream &operator<<(long long N) {
+ if (N < 0)
+ writeUnsigned(static_cast<unsigned long long>(-N), true);
+ else
+ writeUnsigned(static_cast<unsigned long long>(N));
+ return *this;
+ }
+
+ OutputStream &operator<<(unsigned long long N) {
+ writeUnsigned(N, false);
+ return *this;
+ }
+
+ OutputStream &operator<<(long N) {
+ return this->operator<<(static_cast<long long>(N));
+ }
+
+ OutputStream &operator<<(unsigned long N) {
+ return this->operator<<(static_cast<unsigned long long>(N));
+ }
+
+ OutputStream &operator<<(int N) {
+ return this->operator<<(static_cast<long long>(N));
+ }
+
+ OutputStream &operator<<(unsigned int N) {
+ return this->operator<<(static_cast<unsigned long long>(N));
+ }
+
+ size_t getCurrentPosition() const { return CurrentPosition; }
+ void setCurrentPosition(size_t NewPos) { CurrentPosition = NewPos; }
+
+ char back() const {
+ return CurrentPosition ? Buffer[CurrentPosition - 1] : '\0';
+ }
+
+ bool empty() const { return CurrentPosition == 0; }
+
+ char *getBuffer() { return Buffer; }
+ char *getBufferEnd() { return Buffer + CurrentPosition - 1; }
+ size_t getBufferCapacity() { return BufferCapacity; }
+};
+
+template <class T> class SwapAndRestore {
+ T &Restore;
+ T OriginalValue;
+ bool ShouldRestore = true;
+
+public:
+ SwapAndRestore(T &Restore_) : SwapAndRestore(Restore_, Restore_) {}
+
+ SwapAndRestore(T &Restore_, T NewVal)
+ : Restore(Restore_), OriginalValue(Restore) {
+ Restore = std::move(NewVal);
+ }
+ ~SwapAndRestore() {
+ if (ShouldRestore)
+ Restore = std::move(OriginalValue);
+ }
+
+ void shouldRestore(bool ShouldRestore_) { ShouldRestore = ShouldRestore_; }
+
+ void restoreNow(bool Force) {
+ if (!Force && !ShouldRestore)
+ return;
+
+ Restore = std::move(OriginalValue);
+ ShouldRestore = false;
+ }
+
+ SwapAndRestore(const SwapAndRestore &) = delete;
+ SwapAndRestore &operator=(const SwapAndRestore &) = delete;
+};
+
+inline bool initializeOutputStream(char *Buf, size_t *N, OutputStream &S,
+ size_t InitSize) {
+ size_t BufferSize;
+ if (Buf == nullptr) {
+ Buf = static_cast<char *>(std::malloc(InitSize));
+ if (Buf == nullptr)
+ return false;
+ BufferSize = InitSize;
+ } else
+ BufferSize = *N;
+
+ S.reset(Buf, BufferSize);
+ return true;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
index 1ce772ccde95..1b08379b8c3b 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -35,25 +35,6 @@ class ObjectFile;
} // end namespace object
-/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
-/// about a generated machine code function.
-struct JITEvent_EmittedFunctionDetails {
- struct LineStart {
- /// The address at which the current line changes.
- uintptr_t Address;
-
- /// The new location information. These can be translated to DebugLocTuples
- /// using MF->getDebugLocTuple().
- DebugLoc Loc;
- };
-
- /// The machine function the struct contains information for.
- const MachineFunction *MF;
-
- /// The list of line boundary information, sorted by address.
- std::vector<LineStart> LineStarts;
-};
-
/// JITEventListener - Abstract interface for use by the JIT to notify clients
/// about significant events during compilation. For example, to notify
/// profilers and debuggers that need to know where functions have been emitted.
@@ -61,26 +42,26 @@ struct JITEvent_EmittedFunctionDetails {
/// The default implementation of each method does nothing.
class JITEventListener {
public:
- using EmittedFunctionDetails = JITEvent_EmittedFunctionDetails;
+ using ObjectKey = uint64_t;
-public:
JITEventListener() = default;
virtual ~JITEventListener() = default;
- /// NotifyObjectEmitted - Called after an object has been successfully
- /// emitted to memory. NotifyFunctionEmitted will not be called for
+ /// notifyObjectLoaded - Called after an object has had its sections allocated
+ /// and addresses assigned to all symbols. Note: Section memory will not have
+ /// been relocated yet. notifyFunctionLoaded will not be called for
/// individual functions in the object.
///
/// ELF-specific information
/// The ObjectImage contains the generated object image
/// with section headers updated to reflect the address at which sections
/// were loaded and with relocations performed in-place on debug sections.
- virtual void NotifyObjectEmitted(const object::ObjectFile &Obj,
- const RuntimeDyld::LoadedObjectInfo &L) {}
+ virtual void notifyObjectLoaded(ObjectKey K, const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {}
- /// NotifyFreeingObject - Called just before the memory associated with
+ /// notifyFreeingObject - Called just before the memory associated with
/// a previously emitted object is released.
- virtual void NotifyFreeingObject(const object::ObjectFile &Obj) {}
+ virtual void notifyFreeingObject(ObjectKey K) {}
// Get a pointe to the GDB debugger registration listener.
static JITEventListener *createGDBRegistrationListener();
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITSymbol.h b/contrib/llvm/include/llvm/ExecutionEngine/JITSymbol.h
index 53037c3dbc72..05c9590726df 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITSymbol.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITSymbol.h
@@ -23,6 +23,7 @@
#include <set>
#include <string>
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
@@ -32,13 +33,25 @@ class GlobalValue;
namespace object {
-class BasicSymbolRef;
+class SymbolRef;
} // end namespace object
/// Represents an address in the target process's address space.
using JITTargetAddress = uint64_t;
+/// Convert a JITTargetAddress to a pointer.
+template <typename T> T jitTargetAddressToPointer(JITTargetAddress Addr) {
+ static_assert(std::is_pointer<T>::value, "T must be a pointer type");
+ uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
+ assert(IntPtr == Addr && "JITTargetAddress value out of range for uintptr_t");
+ return reinterpret_cast<T>(IntPtr);
+}
+
+template <typename T> JITTargetAddress pointerToJITTargetAddress(T *Ptr) {
+ return static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(Ptr));
+}
+
/// Flags for symbols in the JIT.
class JITSymbolFlags {
public:
@@ -52,8 +65,10 @@ public:
Common = 1U << 2,
Absolute = 1U << 3,
Exported = 1U << 4,
- Lazy = 1U << 5,
- Materializing = 1U << 6
+ Callable = 1U << 5,
+ Lazy = 1U << 6,
+ Materializing = 1U << 7,
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Materializing)
};
static JITSymbolFlags stripTransientFlags(JITSymbolFlags Orig) {
@@ -71,6 +86,26 @@ public:
JITSymbolFlags(FlagNames Flags, TargetFlagsType TargetFlags)
: Flags(Flags), TargetFlags(TargetFlags) {}
+ /// Implicitly convert to bool. Returs true if any flag is set.
+ explicit operator bool() const { return Flags != None || TargetFlags != 0; }
+
+ /// Compare for equality.
+ bool operator==(const JITSymbolFlags &RHS) const {
+ return Flags == RHS.Flags && TargetFlags == RHS.TargetFlags;
+ }
+
+ /// Bitwise AND-assignment for FlagNames.
+ JITSymbolFlags &operator&=(const FlagNames &RHS) {
+ Flags &= RHS;
+ return *this;
+ }
+
+ /// Bitwise OR-assignment for FlagNames.
+ JITSymbolFlags &operator|=(const FlagNames &RHS) {
+ Flags |= RHS;
+ return *this;
+ }
+
/// Return true if there was an error retrieving this symbol.
bool hasError() const {
return (Flags & HasError) == HasError;
@@ -109,11 +144,13 @@ public:
return (Flags & Exported) == Exported;
}
- /// Implicitly convert to the underlying flags type.
- operator UnderlyingType&() { return Flags; }
+ /// Returns true if the given symbol is known to be callable.
+ bool isCallable() const { return (Flags & Callable) == Callable; }
- /// Implicitly convert to the underlying flags type.
- operator const UnderlyingType&() const { return Flags; }
+ /// Get the underlying flags value as an integer.
+ UnderlyingType getRawFlagsValue() const {
+ return static_cast<UnderlyingType>(Flags);
+ }
/// Return a reference to the target-specific flags.
TargetFlagsType& getTargetFlags() { return TargetFlags; }
@@ -127,13 +164,28 @@ public:
/// Construct a JITSymbolFlags value based on the flags of the given libobject
/// symbol.
- static JITSymbolFlags fromObjectSymbol(const object::BasicSymbolRef &Symbol);
+ static Expected<JITSymbolFlags>
+ fromObjectSymbol(const object::SymbolRef &Symbol);
private:
- UnderlyingType Flags = None;
+ FlagNames Flags = None;
TargetFlagsType TargetFlags = 0;
};
+inline JITSymbolFlags operator&(const JITSymbolFlags &LHS,
+ const JITSymbolFlags::FlagNames &RHS) {
+ JITSymbolFlags Tmp = LHS;
+ Tmp &= RHS;
+ return Tmp;
+}
+
+inline JITSymbolFlags operator|(const JITSymbolFlags &LHS,
+ const JITSymbolFlags::FlagNames &RHS) {
+ JITSymbolFlags Tmp = LHS;
+ Tmp |= RHS;
+ return Tmp;
+}
+
/// ARM-specific JIT symbol flags.
/// FIXME: This should be moved into a target-specific header.
class ARMJITSymbolFlags {
@@ -147,8 +199,8 @@ public:
operator JITSymbolFlags::TargetFlagsType&() { return Flags; }
- static ARMJITSymbolFlags fromObjectSymbol(
- const object::BasicSymbolRef &Symbol);
+ static ARMJITSymbolFlags fromObjectSymbol(const object::SymbolRef &Symbol);
+
private:
JITSymbolFlags::TargetFlagsType Flags = 0;
};
@@ -293,7 +345,7 @@ class JITSymbolResolver {
public:
using LookupSet = std::set<StringRef>;
using LookupResult = std::map<StringRef, JITEvaluatedSymbol>;
- using LookupFlagsResult = std::map<StringRef, JITSymbolFlags>;
+ using OnResolvedFunction = std::function<void(Expected<LookupResult>)>;
virtual ~JITSymbolResolver() = default;
@@ -302,13 +354,14 @@ public:
///
/// This method will return an error if any of the given symbols can not be
/// resolved, or if the resolution process itself triggers an error.
- virtual Expected<LookupResult> lookup(const LookupSet &Symbols) = 0;
+ virtual void lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) = 0;
- /// Returns the symbol flags for each of the given symbols.
- ///
- /// This method does NOT return an error if any of the given symbols is
- /// missing. Instead, that symbol will be left out of the result map.
- virtual Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) = 0;
+ /// Returns the subset of the given symbols that should be materialized by
+ /// the caller. Only weak/common symbols should be looked up, as strong
+ /// definitions are implicitly always part of the caller's responsibility.
+ virtual Expected<LookupSet>
+ getResponsibilitySet(const LookupSet &Symbols) = 0;
private:
virtual void anchor();
@@ -320,11 +373,11 @@ public:
/// Performs lookup by, for each symbol, first calling
/// findSymbolInLogicalDylib and if that fails calling
/// findSymbol.
- Expected<LookupResult> lookup(const LookupSet &Symbols) final;
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) final;
/// Performs flags lookup by calling findSymbolInLogicalDylib and
/// returning the flags value for that symbol.
- Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) final;
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) final;
/// This method returns the address of the specified symbol if it exists
/// within the logical dynamic library represented by this JITSymbolResolver.
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index 8bd21a0e3dd6..884878925cde 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -16,6 +16,7 @@
#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@@ -23,6 +24,7 @@
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
#include "llvm/ExecutionEngine/Orc/Legacy.h"
#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
@@ -60,42 +62,73 @@ namespace orc {
class ExtractingIRMaterializationUnit;
-class CompileOnDemandLayer2 : public IRLayer {
- friend class ExtractingIRMaterializationUnit;
+class CompileOnDemandLayer : public IRLayer {
+ friend class PartitioningIRMaterializationUnit;
public:
/// Builder for IndirectStubsManagers.
using IndirectStubsManagerBuilder =
std::function<std::unique_ptr<IndirectStubsManager>()>;
- using GetAvailableContextFunction = std::function<LLVMContext &()>;
+ using GlobalValueSet = std::set<const GlobalValue *>;
- CompileOnDemandLayer2(ExecutionSession &ES, IRLayer &BaseLayer,
- JITCompileCallbackManager &CCMgr,
- IndirectStubsManagerBuilder BuildIndirectStubsManager,
- GetAvailableContextFunction GetAvailableContext);
+ /// Partitioning function.
+ using PartitionFunction =
+ std::function<Optional<GlobalValueSet>(GlobalValueSet Requested)>;
- Error add(VSO &V, VModuleKey K, std::unique_ptr<Module> M) override;
+ /// Off-the-shelf partitioning which compiles all requested symbols (usually
+ /// a single function at a time).
+ static Optional<GlobalValueSet> compileRequested(GlobalValueSet Requested);
- void emit(MaterializationResponsibility R, VModuleKey K,
- std::unique_ptr<Module> M) override;
+ /// Off-the-shelf partitioning which compiles whole modules whenever any
+ /// symbol in them is requested.
+ static Optional<GlobalValueSet> compileWholeModule(GlobalValueSet Requested);
+
+ /// Construct a CompileOnDemandLayer.
+ CompileOnDemandLayer(ExecutionSession &ES, IRLayer &BaseLayer,
+ LazyCallThroughManager &LCTMgr,
+ IndirectStubsManagerBuilder BuildIndirectStubsManager);
+
+ /// Sets the partition function.
+ void setPartitionFunction(PartitionFunction Partition);
+
+ /// Emits the given module. This should not be called by clients: it will be
+ /// called by the JIT when a definition added via the add method is requested.
+ void emit(MaterializationResponsibility R, ThreadSafeModule TSM) override;
private:
- using StubManagersMap =
- std::map<const VSO *, std::unique_ptr<IndirectStubsManager>>;
+ struct PerDylibResources {
+ public:
+ PerDylibResources(JITDylib &ImplD,
+ std::unique_ptr<IndirectStubsManager> ISMgr)
+ : ImplD(ImplD), ISMgr(std::move(ISMgr)) {}
+ JITDylib &getImplDylib() { return ImplD; }
+ IndirectStubsManager &getISManager() { return *ISMgr; }
+
+ private:
+ JITDylib &ImplD;
+ std::unique_ptr<IndirectStubsManager> ISMgr;
+ };
+
+ using PerDylibResourcesMap = std::map<const JITDylib *, PerDylibResources>;
+
+ PerDylibResources &getPerDylibResources(JITDylib &TargetD);
- IndirectStubsManager &getStubsManager(const VSO &V);
+ void cleanUpModule(Module &M);
- void emitExtractedFunctionsModule(MaterializationResponsibility R,
- std::unique_ptr<Module> M);
+ void expandPartition(GlobalValueSet &Partition);
+
+ void emitPartition(MaterializationResponsibility R, ThreadSafeModule TSM,
+ IRMaterializationUnit::SymbolNameToDefinitionMap Defs);
mutable std::mutex CODLayerMutex;
IRLayer &BaseLayer;
- JITCompileCallbackManager &CCMgr;
+ LazyCallThroughManager &LCTMgr;
IndirectStubsManagerBuilder BuildIndirectStubsManager;
- StubManagersMap StubsMgrs;
- GetAvailableContextFunction GetAvailableContext;
+ PerDylibResourcesMap DylibResources;
+ PartitionFunction Partition = compileRequested;
+ SymbolLinkagePromoter PromoteSymbols;
};
/// Compile-on-demand layer.
@@ -108,7 +141,7 @@ private:
template <typename BaseLayerT,
typename CompileCallbackMgrT = JITCompileCallbackManager,
typename IndirectStubsMgrT = IndirectStubsManager>
-class CompileOnDemandLayer {
+class LegacyCompileOnDemandLayer {
private:
template <typename MaterializerFtor>
class LambdaMaterializer final : public ValueMaterializer {
@@ -158,25 +191,6 @@ private:
return llvm::make_unique<RO>(std::move(ResourcePtr));
}
- class StaticGlobalRenamer {
- public:
- StaticGlobalRenamer() = default;
- StaticGlobalRenamer(StaticGlobalRenamer &&) = default;
- StaticGlobalRenamer &operator=(StaticGlobalRenamer &&) = default;
-
- void rename(Module &M) {
- for (auto &F : M)
- if (F.hasLocalLinkage())
- F.setName("$static." + Twine(NextId++));
- for (auto &G : M.globals())
- if (G.hasLocalLinkage())
- G.setName("$static." + Twine(NextId++));
- }
-
- private:
- unsigned NextId = 0;
- };
-
struct LogicalDylib {
struct SourceModuleEntry {
std::unique_ptr<Module> SourceMod;
@@ -230,7 +244,7 @@ private:
VModuleKey K;
std::shared_ptr<SymbolResolver> BackingResolver;
std::unique_ptr<IndirectStubsMgrT> StubsMgr;
- StaticGlobalRenamer StaticRenamer;
+ SymbolLinkagePromoter PromoteSymbols;
SourceModulesList SourceModules;
std::vector<VModuleKey> BaseLayerVModuleKeys;
};
@@ -251,13 +265,13 @@ public:
std::function<void(VModuleKey K, std::shared_ptr<SymbolResolver> R)>;
/// Construct a compile-on-demand layer instance.
- CompileOnDemandLayer(ExecutionSession &ES, BaseLayerT &BaseLayer,
- SymbolResolverGetter GetSymbolResolver,
- SymbolResolverSetter SetSymbolResolver,
- PartitioningFtor Partition,
- CompileCallbackMgrT &CallbackMgr,
- IndirectStubsManagerBuilderT CreateIndirectStubsManager,
- bool CloneStubsIntoPartitions = true)
+ LegacyCompileOnDemandLayer(ExecutionSession &ES, BaseLayerT &BaseLayer,
+ SymbolResolverGetter GetSymbolResolver,
+ SymbolResolverSetter SetSymbolResolver,
+ PartitioningFtor Partition,
+ CompileCallbackMgrT &CallbackMgr,
+ IndirectStubsManagerBuilderT CreateIndirectStubsManager,
+ bool CloneStubsIntoPartitions = true)
: ES(ES), BaseLayer(BaseLayer),
GetSymbolResolver(std::move(GetSymbolResolver)),
SetSymbolResolver(std::move(SetSymbolResolver)),
@@ -265,7 +279,7 @@ public:
CreateIndirectStubsManager(std::move(CreateIndirectStubsManager)),
CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
- ~CompileOnDemandLayer() {
+ ~LegacyCompileOnDemandLayer() {
// FIXME: Report error on log.
while (!LogicalDylibs.empty())
consumeError(removeModule(LogicalDylibs.begin()->first));
@@ -352,14 +366,9 @@ public:
private:
Error addLogicalModule(LogicalDylib &LD, std::unique_ptr<Module> SrcMPtr) {
- // Rename all static functions / globals to $static.X :
- // This will unique the names across all modules in the logical dylib,
- // simplifying symbol lookup.
- LD.StaticRenamer.rename(*SrcMPtr);
-
- // Bump the linkage and rename any anonymous/private members in SrcM to
- // ensure that everything will resolve properly after we partition SrcM.
- makeAllSymbolsExternallyAccessible(*SrcMPtr);
+ // Rename anonymous globals and promote linkage to ensure that everything
+ // will resolve properly after we partition SrcM.
+ LD.PromoteSymbols(*SrcMPtr);
// Create a logical module handle for SrcM within the logical dylib.
Module &SrcM = *SrcMPtr;
@@ -500,28 +509,29 @@ private:
auto GVsResolver = createSymbolResolver(
[&LD, LegacyLookup](const SymbolNameSet &Symbols) {
- auto SymbolFlags = lookupFlagsWithLegacyFn(Symbols, LegacyLookup);
+ auto RS = getResponsibilitySetWithLegacyFn(Symbols, LegacyLookup);
- if (!SymbolFlags) {
- logAllUnhandledErrors(SymbolFlags.takeError(), errs(),
- "CODLayer/GVsResolver flags lookup failed: ");
- return SymbolFlagsMap();
+ if (!RS) {
+ logAllUnhandledErrors(
+ RS.takeError(), errs(),
+ "CODLayer/GVsResolver responsibility set lookup failed: ");
+ return SymbolNameSet();
}
- if (SymbolFlags->size() == Symbols.size())
- return *SymbolFlags;
+ if (RS->size() == Symbols.size())
+ return *RS;
SymbolNameSet NotFoundViaLegacyLookup;
for (auto &S : Symbols)
- if (!SymbolFlags->count(S))
+ if (!RS->count(S))
NotFoundViaLegacyLookup.insert(S);
- auto SymbolFlags2 =
- LD.BackingResolver->lookupFlags(NotFoundViaLegacyLookup);
+ auto RS2 =
+ LD.BackingResolver->getResponsibilitySet(NotFoundViaLegacyLookup);
- for (auto &KV : SymbolFlags2)
- (*SymbolFlags)[KV.first] = std::move(KV.second);
+ for (auto &S : RS2)
+ (*RS).insert(S);
- return *SymbolFlags;
+ return *RS;
},
[this, &LD,
LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Query,
@@ -669,28 +679,29 @@ private:
// Create memory manager and symbol resolver.
auto Resolver = createSymbolResolver(
[&LD, LegacyLookup](const SymbolNameSet &Symbols) {
- auto SymbolFlags = lookupFlagsWithLegacyFn(Symbols, LegacyLookup);
- if (!SymbolFlags) {
- logAllUnhandledErrors(SymbolFlags.takeError(), errs(),
- "CODLayer/SubResolver flags lookup failed: ");
- return SymbolFlagsMap();
+ auto RS = getResponsibilitySetWithLegacyFn(Symbols, LegacyLookup);
+ if (!RS) {
+ logAllUnhandledErrors(
+ RS.takeError(), errs(),
+ "CODLayer/SubResolver responsibility set lookup failed: ");
+ return SymbolNameSet();
}
- if (SymbolFlags->size() == Symbols.size())
- return *SymbolFlags;
+ if (RS->size() == Symbols.size())
+ return *RS;
SymbolNameSet NotFoundViaLegacyLookup;
for (auto &S : Symbols)
- if (!SymbolFlags->count(S))
+ if (!RS->count(S))
NotFoundViaLegacyLookup.insert(S);
- auto SymbolFlags2 =
- LD.BackingResolver->lookupFlags(NotFoundViaLegacyLookup);
+ auto RS2 =
+ LD.BackingResolver->getResponsibilitySet(NotFoundViaLegacyLookup);
- for (auto &KV : SymbolFlags2)
- (*SymbolFlags)[KV.first] = std::move(KV.second);
+ for (auto &S : RS2)
+ (*RS).insert(S);
- return *SymbolFlags;
+ return *RS;
},
[this, &LD, LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Q,
SymbolNameSet Symbols) {
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
index 213a59124c85..f34f88311ba5 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -16,7 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
-#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
@@ -38,7 +38,7 @@ namespace orc {
/// Simple compile functor: Takes a single IR module and returns an ObjectFile.
/// This compiler supports a single compilation thread and LLVMContext only.
-/// For multithreaded compilation, use MultiThreadedSimpleCompiler below.
+/// For multithreaded compilation, use ConcurrentIRCompiler below.
class SimpleCompiler {
public:
using CompileResult = std::unique_ptr<MemoryBuffer>;
@@ -105,10 +105,10 @@ private:
///
/// This class creates a new TargetMachine and SimpleCompiler instance for each
/// compile.
-class MultiThreadedSimpleCompiler {
+class ConcurrentIRCompiler {
public:
- MultiThreadedSimpleCompiler(JITTargetMachineBuilder JTMB,
- ObjectCache *ObjCache = nullptr)
+ ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
+ ObjectCache *ObjCache = nullptr)
: JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
void setObjectCache(ObjectCache *ObjCache) { this->ObjCache = ObjCache; }
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Core.h
index 11d7c091947e..39d306e0bd4c 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Core.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Core.h
@@ -18,13 +18,13 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
-#include <list>
-#include <map>
#include <memory>
-#include <set>
#include <vector>
+#define DEBUG_TYPE "orc"
+
namespace llvm {
namespace orc {
@@ -33,7 +33,7 @@ class AsynchronousSymbolQuery;
class ExecutionSession;
class MaterializationUnit;
class MaterializationResponsibility;
-class VSO;
+class JITDylib;
/// VModuleKey provides a unique identifier (allocated and managed by
/// ExecutionSessions) for a module added to the JIT.
@@ -41,36 +41,52 @@ using VModuleKey = uint64_t;
/// A set of symbol names (represented by SymbolStringPtrs for
// efficiency).
-using SymbolNameSet = std::set<SymbolStringPtr>;
-
-/// Render a SymbolNameSet to an ostream.
-raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols);
+using SymbolNameSet = DenseSet<SymbolStringPtr>;
/// A map from symbol names (as SymbolStringPtrs) to JITSymbols
/// (address/flags pairs).
-using SymbolMap = std::map<SymbolStringPtr, JITEvaluatedSymbol>;
-
-/// Render a SymbolMap to an ostream.
-raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols);
+using SymbolMap = DenseMap<SymbolStringPtr, JITEvaluatedSymbol>;
/// A map from symbol names (as SymbolStringPtrs) to JITSymbolFlags.
-using SymbolFlagsMap = std::map<SymbolStringPtr, JITSymbolFlags>;
-
-/// Render a SymbolMap to an ostream.
-raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &Symbols);
+using SymbolFlagsMap = DenseMap<SymbolStringPtr, JITSymbolFlags>;
/// A base class for materialization failures that allows the failing
/// symbols to be obtained for logging.
-using SymbolDependenceMap = std::map<VSO *, SymbolNameSet>;
+using SymbolDependenceMap = DenseMap<JITDylib *, SymbolNameSet>;
+
+/// A list of (JITDylib*, bool) pairs.
+using JITDylibSearchList = std::vector<std::pair<JITDylib *, bool>>;
+
+/// Render a SymbolStringPtr.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym);
+
+/// Render a SymbolNameSet.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols);
+
+/// Render a SymbolFlagsMap entry.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV);
+
+/// Render a SymbolMap entry.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV);
+
+/// Render a SymbolFlagsMap.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags);
+
+/// Render a SymbolMap.
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols);
+
+/// Render a SymbolDependenceMap entry.
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolDependenceMap::value_type &KV);
/// Render a SymbolDependendeMap.
raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps);
-/// A list of VSO pointers.
-using VSOList = std::vector<VSO *>;
+/// Render a MaterializationUnit.
+raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU);
-/// Render a VSOList.
-raw_ostream &operator<<(raw_ostream &OS, const VSOList &VSOs);
+/// Render a JITDylibSearchList.
+raw_ostream &operator<<(raw_ostream &OS, const JITDylibSearchList &JDs);
/// Callback to notify client that symbols have been resolved.
using SymbolsResolvedCallback = std::function<void(Expected<SymbolMap>)>;
@@ -86,7 +102,8 @@ using RegisterDependenciesFunction =
/// are no dependants to register with.
extern RegisterDependenciesFunction NoDependenciesToRegister;
-/// Used to notify a VSO that the given set of symbols failed to materialize.
+/// Used to notify a JITDylib that the given set of symbols failed to
+/// materialize.
class FailedToMaterialize : public ErrorInfo<FailedToMaterialize> {
public:
static char ID;
@@ -114,12 +131,26 @@ private:
SymbolNameSet Symbols;
};
+/// Used to notify clients that a set of symbols could not be removed.
+class SymbolsCouldNotBeRemoved : public ErrorInfo<SymbolsCouldNotBeRemoved> {
+public:
+ static char ID;
+
+ SymbolsCouldNotBeRemoved(SymbolNameSet Symbols);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const SymbolNameSet &getSymbols() const { return Symbols; }
+
+private:
+ SymbolNameSet Symbols;
+};
+
/// Tracks responsibility for materialization, and mediates interactions between
-/// MaterializationUnits and VSOs.
+/// MaterializationUnits and JDs.
///
/// An instance of this class is passed to MaterializationUnits when their
/// materialize method is called. It allows MaterializationUnits to resolve and
-/// finalize symbols, or abandon materialization by notifying any unmaterialized
+/// emit symbols, or abandon materialization by notifying any unmaterialized
/// symbols of an error.
class MaterializationResponsibility {
friend class MaterializationUnit;
@@ -130,41 +161,54 @@ public:
/// Destruct a MaterializationResponsibility instance. In debug mode
/// this asserts that all symbols being tracked have been either
- /// finalized or notified of an error.
+ /// emitted or notified of an error.
~MaterializationResponsibility();
- /// Returns the target VSO that these symbols are being materialized
+ /// Returns the target JITDylib that these symbols are being materialized
/// into.
- VSO &getTargetVSO() const { return V; }
+ JITDylib &getTargetJITDylib() const { return JD; }
+
+ /// Returns the VModuleKey for this instance.
+ VModuleKey getVModuleKey() const { return K; }
/// Returns the symbol flags map for this responsibility instance.
- SymbolFlagsMap getSymbols() { return SymbolFlags; }
+ /// Note: The returned flags may have transient flags (Lazy, Materializing)
+ /// set. These should be stripped with JITSymbolFlags::stripTransientFlags
+ /// before using.
+ const SymbolFlagsMap &getSymbols() { return SymbolFlags; }
/// Returns the names of any symbols covered by this
/// MaterializationResponsibility object that have queries pending. This
/// information can be used to return responsibility for unrequested symbols
- /// back to the VSO via the delegate method.
- SymbolNameSet getRequestedSymbols();
-
- /// Resolves the given symbols. Individual calls to this method may
- /// resolve a subset of the symbols, but all symbols must have been
- /// resolved prior to calling finalize.
+ /// back to the JITDylib via the delegate method.
+ SymbolNameSet getRequestedSymbols() const;
+
+ /// Notifies the target JITDylib that the given symbols have been resolved.
+ /// This will update the given symbols' addresses in the JITDylib, and notify
+ /// any pending queries on the given symbols of their resolution. The given
+ /// symbols must be ones covered by this MaterializationResponsibility
+ /// instance. Individual calls to this method may resolve a subset of the
+ /// symbols, but all symbols must have been resolved prior to calling emit.
void resolve(const SymbolMap &Symbols);
- /// Finalizes all symbols tracked by this instance.
- void finalize();
+ /// Notifies the target JITDylib (and any pending queries on that JITDylib)
+ /// that all symbols covered by this MaterializationResponsibility instance
+ /// have been emitted.
+ void emit();
- /// Adds new symbols to the VSO and this responsibility instance.
- /// VSO entries start out in the materializing state.
+ /// Adds new symbols to the JITDylib and this responsibility instance.
+ /// JITDylib entries start out in the materializing state.
///
/// This method can be used by materialization units that want to add
/// additional symbols at materialization time (e.g. stubs, compile
/// callbacks, metadata).
Error defineMaterializing(const SymbolFlagsMap &SymbolFlags);
- /// Notify all unfinalized symbols that an error has occurred.
+ /// Notify all not-yet-emitted covered by this MaterializationResponsibility
+ /// instance that an error has occurred.
/// This will remove all symbols covered by this MaterializationResponsibilty
- /// from V, and send an error to any queries waiting on these symbols.
+ /// from the target JITDylib, and send an error to any queries waiting on
+ /// these symbols.
void failMaterialization();
/// Transfers responsibility to the given MaterializationUnit for all
@@ -177,7 +221,8 @@ public:
/// Delegates responsibility for the given symbols to the returned
/// materialization responsibility. Useful for breaking up work between
/// threads, or different kinds of materialization processes.
- MaterializationResponsibility delegate(const SymbolNameSet &Symbols);
+ MaterializationResponsibility delegate(const SymbolNameSet &Symbols,
+ VModuleKey NewKey = VModuleKey());
void addDependencies(const SymbolStringPtr &Name,
const SymbolDependenceMap &Dependencies);
@@ -186,12 +231,14 @@ public:
void addDependenciesForAll(const SymbolDependenceMap &Dependencies);
private:
- /// Create a MaterializationResponsibility for the given VSO and
+ /// Create a MaterializationResponsibility for the given JITDylib and
/// initial symbols.
- MaterializationResponsibility(VSO &V, SymbolFlagsMap SymbolFlags);
+ MaterializationResponsibility(JITDylib &JD, SymbolFlagsMap SymbolFlags,
+ VModuleKey K);
- VSO &V;
+ JITDylib &JD;
SymbolFlagsMap SymbolFlags;
+ VModuleKey K;
};
/// A MaterializationUnit represents a set of symbol definitions that can
@@ -199,35 +246,41 @@ private:
/// overriding definitions are encountered).
///
/// MaterializationUnits are used when providing lazy definitions of symbols to
-/// VSOs. The VSO will call materialize when the address of a symbol is
-/// requested via the lookup method. The VSO will call discard if a stronger
-/// definition is added or already present.
+/// JITDylibs. The JITDylib will call materialize when the address of a symbol
+/// is requested via the lookup method. The JITDylib will call discard if a
+/// stronger definition is added or already present.
class MaterializationUnit {
public:
- MaterializationUnit(SymbolFlagsMap InitalSymbolFlags)
- : SymbolFlags(std::move(InitalSymbolFlags)) {}
+ MaterializationUnit(SymbolFlagsMap InitalSymbolFlags, VModuleKey K)
+ : SymbolFlags(std::move(InitalSymbolFlags)), K(std::move(K)) {}
virtual ~MaterializationUnit() {}
+ /// Return the name of this materialization unit. Useful for debugging
+ /// output.
+ virtual StringRef getName() const = 0;
+
/// Return the set of symbols that this source provides.
const SymbolFlagsMap &getSymbols() const { return SymbolFlags; }
/// Called by materialization dispatchers (see
/// ExecutionSession::DispatchMaterializationFunction) to trigger
/// materialization of this MaterializationUnit.
- void doMaterialize(VSO &V) {
- materialize(MaterializationResponsibility(V, std::move(SymbolFlags)));
+ void doMaterialize(JITDylib &JD) {
+ materialize(MaterializationResponsibility(JD, std::move(SymbolFlags),
+ std::move(K)));
}
- /// Called by VSOs to notify MaterializationUnits that the given symbol has
- /// been overridden.
- void doDiscard(const VSO &V, SymbolStringPtr Name) {
+ /// Called by JITDylibs to notify MaterializationUnits that the given symbol
+ /// has been overridden.
+ void doDiscard(const JITDylib &JD, const SymbolStringPtr &Name) {
SymbolFlags.erase(Name);
- discard(V, std::move(Name));
+ discard(JD, std::move(Name));
}
protected:
SymbolFlagsMap SymbolFlags;
+ VModuleKey K;
private:
virtual void anchor();
@@ -241,7 +294,7 @@ private:
/// from the source (e.g. if the source is an LLVM IR Module and the
/// symbol is a function, delete the function body or mark it available
/// externally).
- virtual void discard(const VSO &V, SymbolStringPtr Name) = 0;
+ virtual void discard(const JITDylib &JD, const SymbolStringPtr &Name) = 0;
};
using MaterializationUnitList =
@@ -253,30 +306,32 @@ using MaterializationUnitList =
/// materialized.
class AbsoluteSymbolsMaterializationUnit : public MaterializationUnit {
public:
- AbsoluteSymbolsMaterializationUnit(SymbolMap Symbols);
+ AbsoluteSymbolsMaterializationUnit(SymbolMap Symbols, VModuleKey K);
+
+ StringRef getName() const override;
private:
void materialize(MaterializationResponsibility R) override;
- void discard(const VSO &V, SymbolStringPtr Name) override;
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
static SymbolFlagsMap extractFlags(const SymbolMap &Symbols);
SymbolMap Symbols;
};
/// Create an AbsoluteSymbolsMaterializationUnit with the given symbols.
-/// Useful for inserting absolute symbols into a VSO. E.g.:
+/// Useful for inserting absolute symbols into a JITDylib. E.g.:
/// \code{.cpp}
-/// VSO &V = ...;
+/// JITDylib &JD = ...;
/// SymbolStringPtr Foo = ...;
/// JITEvaluatedSymbol FooSym = ...;
-/// if (auto Err = V.define(absoluteSymbols({{Foo, FooSym}})))
+/// if (auto Err = JD.define(absoluteSymbols({{Foo, FooSym}})))
/// return Err;
/// \endcode
///
inline std::unique_ptr<AbsoluteSymbolsMaterializationUnit>
-absoluteSymbols(SymbolMap Symbols) {
+absoluteSymbols(SymbolMap Symbols, VModuleKey K = VModuleKey()) {
return llvm::make_unique<AbsoluteSymbolsMaterializationUnit>(
- std::move(Symbols));
+ std::move(Symbols), std::move(K));
}
struct SymbolAliasMapEntry {
@@ -289,191 +344,87 @@ struct SymbolAliasMapEntry {
};
/// A map of Symbols to (Symbol, Flags) pairs.
-using SymbolAliasMap = std::map<SymbolStringPtr, SymbolAliasMapEntry>;
+using SymbolAliasMap = DenseMap<SymbolStringPtr, SymbolAliasMapEntry>;
/// A materialization unit for symbol aliases. Allows existing symbols to be
/// aliased with alternate flags.
class ReExportsMaterializationUnit : public MaterializationUnit {
public:
- /// SourceVSO is allowed to be nullptr, in which case the source VSO is
- /// taken to be whatever VSO these definitions are materialized in. This
- /// is useful for defining aliases within a VSO.
+ /// SourceJD is allowed to be nullptr, in which case the source JITDylib is
+ /// taken to be whatever JITDylib these definitions are materialized in (and
+ /// MatchNonExported has no effect). This is useful for defining aliases
+ /// within a JITDylib.
///
/// Note: Care must be taken that no sets of aliases form a cycle, as such
/// a cycle will result in a deadlock when any symbol in the cycle is
/// resolved.
- ReExportsMaterializationUnit(VSO *SourceVSO, SymbolAliasMap Aliases);
+ ReExportsMaterializationUnit(JITDylib *SourceJD, bool MatchNonExported,
+ SymbolAliasMap Aliases, VModuleKey K);
+
+ StringRef getName() const override;
private:
void materialize(MaterializationResponsibility R) override;
- void discard(const VSO &V, SymbolStringPtr Name) override;
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
static SymbolFlagsMap extractFlags(const SymbolAliasMap &Aliases);
- VSO *SourceVSO = nullptr;
+ JITDylib *SourceJD = nullptr;
+ bool MatchNonExported = false;
SymbolAliasMap Aliases;
};
/// Create a ReExportsMaterializationUnit with the given aliases.
-/// Useful for defining symbol aliases.: E.g., given a VSO V containing symbols
-/// "foo" and "bar", we can define aliases "baz" (for "foo") and "qux" (for
-/// "bar") with:
-/// \code{.cpp}
+/// Useful for defining symbol aliases.: E.g., given a JITDylib JD containing
+/// symbols "foo" and "bar", we can define aliases "baz" (for "foo") and "qux"
+/// (for "bar") with: \code{.cpp}
/// SymbolStringPtr Baz = ...;
/// SymbolStringPtr Qux = ...;
-/// if (auto Err = V.define(symbolAliases({
+/// if (auto Err = JD.define(symbolAliases({
/// {Baz, { Foo, JITSymbolFlags::Exported }},
/// {Qux, { Bar, JITSymbolFlags::Weak }}}))
/// return Err;
/// \endcode
inline std::unique_ptr<ReExportsMaterializationUnit>
-symbolAliases(SymbolAliasMap Aliases) {
- return llvm::make_unique<ReExportsMaterializationUnit>(nullptr,
- std::move(Aliases));
+symbolAliases(SymbolAliasMap Aliases, VModuleKey K = VModuleKey()) {
+ return llvm::make_unique<ReExportsMaterializationUnit>(
+ nullptr, true, std::move(Aliases), std::move(K));
}
-/// Create a materialization unit for re-exporting symbols from another VSO
+/// Create a materialization unit for re-exporting symbols from another JITDylib
/// with alternative names/flags.
+/// If MatchNonExported is true then non-exported symbols from SourceJD can be
+/// re-exported. If it is false, attempts to re-export a non-exported symbol
+/// will result in a "symbol not found" error.
inline std::unique_ptr<ReExportsMaterializationUnit>
-reexports(VSO &SourceV, SymbolAliasMap Aliases) {
- return llvm::make_unique<ReExportsMaterializationUnit>(&SourceV,
- std::move(Aliases));
+reexports(JITDylib &SourceJD, SymbolAliasMap Aliases,
+ bool MatchNonExported = false, VModuleKey K = VModuleKey()) {
+ return llvm::make_unique<ReExportsMaterializationUnit>(
+ &SourceJD, MatchNonExported, std::move(Aliases), std::move(K));
}
/// Build a SymbolAliasMap for the common case where you want to re-export
-/// symbols from another VSO with the same linkage/flags.
+/// symbols from another JITDylib with the same linkage/flags.
Expected<SymbolAliasMap>
-buildSimpleReexportsAliasMap(VSO &SourceV, const SymbolNameSet &Symbols);
-
-/// Base utilities for ExecutionSession.
-class ExecutionSessionBase {
- // FIXME: Remove this when we remove the old ORC layers.
- friend class VSO;
+buildSimpleReexportsAliasMap(JITDylib &SourceJD, const SymbolNameSet &Symbols);
+/// ReexportsGenerator can be used with JITDylib::setGenerator to automatically
+/// re-export a subset of the source JITDylib's symbols in the target.
+class ReexportsGenerator {
public:
- /// For reporting errors.
- using ErrorReporter = std::function<void(Error)>;
-
- /// For dispatching MaterializationUnit::materialize calls.
- using DispatchMaterializationFunction =
- std::function<void(VSO &V, std::unique_ptr<MaterializationUnit> MU)>;
-
- /// Construct an ExecutionSessionBase.
- ///
- /// SymbolStringPools may be shared between ExecutionSessions.
- ExecutionSessionBase(std::shared_ptr<SymbolStringPool> SSP = nullptr)
- : SSP(SSP ? std::move(SSP) : std::make_shared<SymbolStringPool>()) {}
-
- /// Returns the SymbolStringPool for this ExecutionSession.
- SymbolStringPool &getSymbolStringPool() const { return *SSP; }
-
- /// Run the given lambda with the session mutex locked.
- template <typename Func> auto runSessionLocked(Func &&F) -> decltype(F()) {
- std::lock_guard<std::recursive_mutex> Lock(SessionMutex);
- return F();
- }
-
- /// Set the error reporter function.
- ExecutionSessionBase &setErrorReporter(ErrorReporter ReportError) {
- this->ReportError = std::move(ReportError);
- return *this;
- }
-
- /// Set the materialization dispatch function.
- ExecutionSessionBase &setDispatchMaterialization(
- DispatchMaterializationFunction DispatchMaterialization) {
- this->DispatchMaterialization = std::move(DispatchMaterialization);
- return *this;
- }
-
- /// Report a error for this execution session.
- ///
- /// Unhandled errors can be sent here to log them.
- void reportError(Error Err) { ReportError(std::move(Err)); }
+ using SymbolPredicate = std::function<bool(SymbolStringPtr)>;
- /// Allocate a module key for a new module to add to the JIT.
- VModuleKey allocateVModule() { return ++LastKey; }
+ /// Create a reexports generator. If an Allow predicate is passed, only
+ /// symbols for which the predicate returns true will be reexported. If no
+ /// Allow predicate is passed, all symbols will be exported.
+ ReexportsGenerator(JITDylib &SourceJD, bool MatchNonExported = false,
+ SymbolPredicate Allow = SymbolPredicate());
- /// Return a module key to the ExecutionSession so that it can be
- /// re-used. This should only be done once all resources associated
- /// with the original key have been released.
- void releaseVModule(VModuleKey Key) { /* FIXME: Recycle keys */
- }
-
- void legacyFailQuery(AsynchronousSymbolQuery &Q, Error Err);
-
- using LegacyAsyncLookupFunction = std::function<SymbolNameSet(
- std::shared_ptr<AsynchronousSymbolQuery> Q, SymbolNameSet Names)>;
-
- /// A legacy lookup function for JITSymbolResolverAdapter.
- /// Do not use -- this will be removed soon.
- Expected<SymbolMap>
- legacyLookup(ExecutionSessionBase &ES, LegacyAsyncLookupFunction AsyncLookup,
- SymbolNameSet Names, bool WaiUntilReady,
- RegisterDependenciesFunction RegisterDependencies);
-
- /// Search the given VSO list for the given symbols.
- ///
- ///
- /// The OnResolve callback will be called once all requested symbols are
- /// resolved, or if an error occurs prior to resolution.
- ///
- /// The OnReady callback will be called once all requested symbols are ready,
- /// or if an error occurs after resolution but before all symbols are ready.
- ///
- /// If all symbols are found, the RegisterDependencies function will be called
- /// while the session lock is held. This gives clients a chance to register
- /// dependencies for on the queried symbols for any symbols they are
- /// materializing (if a MaterializationResponsibility instance is present,
- /// this can be implemented by calling
- /// MaterializationResponsibility::addDependencies). If there are no
- /// dependenant symbols for this query (e.g. it is being made by a top level
- /// client to get an address to call) then the value NoDependenciesToRegister
- /// can be used.
- void lookup(const VSOList &VSOs, const SymbolNameSet &Symbols,
- SymbolsResolvedCallback OnResolve, SymbolsReadyCallback OnReady,
- RegisterDependenciesFunction RegisterDependencies);
-
- /// Blocking version of lookup above. Returns the resolved symbol map.
- /// If WaitUntilReady is true (the default), will not return until all
- /// requested symbols are ready (or an error occurs). If WaitUntilReady is
- /// false, will return as soon as all requested symbols are resolved,
- /// or an error occurs. If WaitUntilReady is false and an error occurs
- /// after resolution, the function will return a success value, but the
- /// error will be reported via reportErrors.
- Expected<SymbolMap> lookup(const VSOList &VSOs, const SymbolNameSet &Symbols,
- RegisterDependenciesFunction RegisterDependencies,
- bool WaitUntilReady = true);
-
- /// Materialize the given unit.
- void dispatchMaterialization(VSO &V,
- std::unique_ptr<MaterializationUnit> MU) {
- DispatchMaterialization(V, std::move(MU));
- }
+ SymbolNameSet operator()(JITDylib &JD, const SymbolNameSet &Names);
private:
- static void logErrorsToStdErr(Error Err) {
- logAllUnhandledErrors(std::move(Err), errs(), "JIT session error: ");
- }
-
- static void
- materializeOnCurrentThread(VSO &V, std::unique_ptr<MaterializationUnit> MU) {
- MU->doMaterialize(V);
- }
-
- void runOutstandingMUs();
-
- mutable std::recursive_mutex SessionMutex;
- std::shared_ptr<SymbolStringPool> SSP;
- VModuleKey LastKey = 0;
- ErrorReporter ReportError = logErrorsToStdErr;
- DispatchMaterializationFunction DispatchMaterialization =
- materializeOnCurrentThread;
-
- // FIXME: Remove this (and runOutstandingMUs) once the linking layer works
- // with callbacks from asynchronous queries.
- mutable std::recursive_mutex OutstandingMUsMutex;
- std::vector<std::pair<VSO *, std::unique_ptr<MaterializationUnit>>>
- OutstandingMUs;
+ JITDylib &SourceJD;
+ bool MatchNonExported = false;
+ SymbolPredicate Allow;
};
/// A symbol query that returns results via a callback when results are
@@ -481,8 +432,9 @@ private:
///
/// makes a callback when all symbols are available.
class AsynchronousSymbolQuery {
- friend class ExecutionSessionBase;
- friend class VSO;
+ friend class ExecutionSession;
+ friend class JITDylib;
+ friend class JITSymbolResolverAdapter;
public:
@@ -517,9 +469,9 @@ public:
void handleFullyReady();
private:
- void addQueryDependence(VSO &V, SymbolStringPtr Name);
+ void addQueryDependence(JITDylib &JD, SymbolStringPtr Name);
- void removeQueryDependence(VSO &V, const SymbolStringPtr &Name);
+ void removeQueryDependence(JITDylib &JD, const SymbolStringPtr &Name);
bool canStillFail();
@@ -539,110 +491,118 @@ private:
///
/// Represents a virtual shared object. Instances can not be copied or moved, so
/// their addresses may be used as keys for resource management.
-/// VSO state changes must be made via an ExecutionSession to guarantee that
-/// they are synchronized with respect to other VSO operations.
-class VSO {
+/// JITDylib state changes must be made via an ExecutionSession to guarantee
+/// that they are synchronized with respect to other JITDylib operations.
+class JITDylib {
friend class AsynchronousSymbolQuery;
friend class ExecutionSession;
- friend class ExecutionSessionBase;
friend class MaterializationResponsibility;
public:
- using FallbackDefinitionGeneratorFunction =
- std::function<SymbolNameSet(VSO &Parent, const SymbolNameSet &Names)>;
+ using GeneratorFunction = std::function<SymbolNameSet(
+ JITDylib &Parent, const SymbolNameSet &Names)>;
using AsynchronousSymbolQuerySet =
- std::set<std::shared_ptr<AsynchronousSymbolQuery>>;
+ std::set<std::shared_ptr<AsynchronousSymbolQuery>>;
- VSO(const VSO &) = delete;
- VSO &operator=(const VSO &) = delete;
- VSO(VSO &&) = delete;
- VSO &operator=(VSO &&) = delete;
+ JITDylib(const JITDylib &) = delete;
+ JITDylib &operator=(const JITDylib &) = delete;
+ JITDylib(JITDylib &&) = delete;
+ JITDylib &operator=(JITDylib &&) = delete;
- /// Get the name for this VSO.
- const std::string &getName() const { return VSOName; }
+ /// Get the name for this JITDylib.
+ const std::string &getName() const { return JITDylibName; }
- /// Get a reference to the ExecutionSession for this VSO.
- ExecutionSessionBase &getExecutionSession() const { return ES; }
+ /// Get a reference to the ExecutionSession for this JITDylib.
+ ExecutionSession &getExecutionSession() const { return ES; }
- /// Set a fallback defenition generator. If set, lookup and lookupFlags will
- /// pass the unresolved symbols set to the fallback definition generator,
- /// allowing it to add a new definition to the VSO.
- void setFallbackDefinitionGenerator(
- FallbackDefinitionGeneratorFunction FallbackDefinitionGenerator) {
- this->FallbackDefinitionGenerator = std::move(FallbackDefinitionGenerator);
+ /// Set a definition generator. If set, whenever a symbol fails to resolve
+ /// within this JITDylib, lookup and lookupFlags will pass the unresolved
+ /// symbols set to the definition generator. The generator can optionally
+ /// add a definition for the unresolved symbols to the dylib.
+ void setGenerator(GeneratorFunction DefGenerator) {
+ this->DefGenerator = std::move(DefGenerator);
}
- /// Set the search order to be used when fixing up definitions in VSO.
+ /// Set the search order to be used when fixing up definitions in JITDylib.
/// This will replace the previous search order, and apply to any symbol
- /// resolutions made for definitions in this VSO after the call to
+ /// resolutions made for definitions in this JITDylib after the call to
/// setSearchOrder (even if the definition itself was added before the
/// call).
///
- /// If SearchThisVSOFirst is set, which by default it is, then this VSO will
- /// add itself to the beginning of the SearchOrder (Clients should *not*
- /// put this VSO in the list in this case, to avoid redundant lookups).
+ /// If SearchThisJITDylibFirst is set, which by default it is, then this
+ /// JITDylib will add itself to the beginning of the SearchOrder (Clients
+ /// should *not* put this JITDylib in the list in this case, to avoid
+ /// redundant lookups).
///
- /// If SearchThisVSOFirst is false then the search order will be used as
+ /// If SearchThisJITDylibFirst is false then the search order will be used as
/// given. The main motivation for this feature is to support deliberate
- /// shadowing of symbols in this VSO by a facade VSO. For example, the
- /// facade may resolve function names to stubs, and the stubs may compile
+ /// shadowing of symbols in this JITDylib by a facade JITDylib. For example,
+ /// the facade may resolve function names to stubs, and the stubs may compile
/// lazily by looking up symbols in this dylib. Adding the facade dylib
/// as the first in the search order (instead of this dylib) ensures that
/// definitions within this dylib resolve to the lazy-compiling stubs,
/// rather than immediately materializing the definitions in this dylib.
- void setSearchOrder(VSOList NewSearchOrder, bool SearchThisVSOFirst = true);
+ void setSearchOrder(JITDylibSearchList NewSearchOrder,
+ bool SearchThisJITDylibFirst = true,
+ bool MatchNonExportedInThisDylib = true);
- /// Add the given VSO to the search order for definitions in this VSO.
- void addToSearchOrder(VSO &V);
+ /// Add the given JITDylib to the search order for definitions in this
+ /// JITDylib.
+ void addToSearchOrder(JITDylib &JD, bool MatcNonExported = false);
- /// Replace OldV with NewV in the search order if OldV is present. Otherwise
- /// this operation is a no-op.
- void replaceInSearchOrder(VSO &OldV, VSO &NewV);
+ /// Replace OldJD with NewJD in the search order if OldJD is present.
+ /// Otherwise this operation is a no-op.
+ void replaceInSearchOrder(JITDylib &OldJD, JITDylib &NewJD,
+ bool MatchNonExported = false);
- /// Remove the given VSO from the search order for this VSO if it is
+ /// Remove the given JITDylib from the search order for this JITDylib if it is
/// present. Otherwise this operation is a no-op.
- void removeFromSearchOrder(VSO &V);
+ void removeFromSearchOrder(JITDylib &JD);
/// Do something with the search order (run under the session lock).
template <typename Func>
auto withSearchOrderDo(Func &&F)
- -> decltype(F(std::declval<const VSOList &>())) {
- return ES.runSessionLocked([&]() { return F(SearchOrder); });
- }
+ -> decltype(F(std::declval<const JITDylibSearchList &>()));
- /// Define all symbols provided by the materialization unit to be part
- /// of the given VSO.
- template <typename UniquePtrToMaterializationUnit>
- typename std::enable_if<
- std::is_convertible<
- typename std::decay<UniquePtrToMaterializationUnit>::type,
- std::unique_ptr<MaterializationUnit>>::value,
- Error>::type
- define(UniquePtrToMaterializationUnit &&MU) {
- return ES.runSessionLocked([&, this]() -> Error {
- assert(MU && "Can't define with a null MU");
-
- if (auto Err = defineImpl(*MU))
- return Err;
-
- /// defineImpl succeeded.
- auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU));
- for (auto &KV : UMI->MU->getSymbols())
- UnmaterializedInfos[KV.first] = UMI;
-
- return Error::success();
- });
- }
+ /// Define all symbols provided by the materialization unit to be part of this
+ /// JITDylib.
+ ///
+ /// This overload always takes ownership of the MaterializationUnit. If any
+ /// errors occur, the MaterializationUnit consumed.
+ template <typename MaterializationUnitType>
+ Error define(std::unique_ptr<MaterializationUnitType> &&MU);
- /// Search the given VSO for the symbols in Symbols. If found, store
+ /// Define all symbols provided by the materialization unit to be part of this
+ /// JITDylib.
+ ///
+ /// This overload only takes ownership of the MaterializationUnit no error is
+ /// generated. If an error occurs, ownership remains with the caller. This
+ /// may allow the caller to modify the MaterializationUnit to correct the
+ /// issue, then re-call define.
+ template <typename MaterializationUnitType>
+ Error define(std::unique_ptr<MaterializationUnitType> &MU);
+
+ /// Tries to remove the given symbols.
+ ///
+ /// If any symbols are not defined in this JITDylib this method will return
+ /// a SymbolsNotFound error covering the missing symbols.
+ ///
+ /// If all symbols are found but some symbols are in the process of being
+ /// materialized this method will return a SymbolsCouldNotBeRemoved error.
+ ///
+ /// On success, all symbols are removed. On failure, the JITDylib state is
+ /// left unmodified (no symbols are removed).
+ Error remove(const SymbolNameSet &Names);
+
+ /// Search the given JITDylib for the symbols in Symbols. If found, store
/// the flags for each symbol in Flags. Returns any unresolved symbols.
SymbolFlagsMap lookupFlags(const SymbolNameSet &Names);
- /// Dump current VSO state to OS.
+ /// Dump current JITDylib state to OS.
void dump(raw_ostream &OS);
/// FIXME: Remove this when we remove the old ORC layers.
- /// Search the given VSOs in order for the symbols in Symbols. Results
+ /// Search the given JITDylibs in order for the symbols in Symbols. Results
/// (once they become available) will be returned via the given Query.
///
/// If any symbol is not found then the unresolved symbols will be returned,
@@ -664,16 +624,16 @@ private:
};
using UnmaterializedInfosMap =
- std::map<SymbolStringPtr, std::shared_ptr<UnmaterializedInfo>>;
+ DenseMap<SymbolStringPtr, std::shared_ptr<UnmaterializedInfo>>;
struct MaterializingInfo {
AsynchronousSymbolQueryList PendingQueries;
SymbolDependenceMap Dependants;
- SymbolDependenceMap UnfinalizedDependencies;
- bool IsFinalized = false;
+ SymbolDependenceMap UnemittedDependencies;
+ bool IsEmitted = false;
};
- using MaterializingInfosMap = std::map<SymbolStringPtr, MaterializingInfo>;
+ using MaterializingInfosMap = DenseMap<SymbolStringPtr, MaterializingInfo>;
using LookupImplActionFlags = enum {
None = 0,
@@ -682,7 +642,7 @@ private:
LLVM_MARK_AS_BITMASK_ENUM(NotifyFullyReady)
};
- VSO(ExecutionSessionBase &ES, std::string Name);
+ JITDylib(ExecutionSession &ES, std::string Name);
Error defineImpl(MaterializationUnit &MU);
@@ -690,10 +650,12 @@ private:
const SymbolNameSet &Names);
void lodgeQuery(std::shared_ptr<AsynchronousSymbolQuery> &Q,
- SymbolNameSet &Unresolved, MaterializationUnitList &MUs);
+ SymbolNameSet &Unresolved, bool MatchNonExported,
+ MaterializationUnitList &MUs);
void lodgeQueryImpl(std::shared_ptr<AsynchronousSymbolQuery> &Q,
- SymbolNameSet &Unresolved, MaterializationUnitList &MUs);
+ SymbolNameSet &Unresolved, bool MatchNonExported,
+ MaterializationUnitList &MUs);
LookupImplActionFlags
lookupImpl(std::shared_ptr<AsynchronousSymbolQuery> &Q,
@@ -703,77 +665,266 @@ private:
void detachQueryHelper(AsynchronousSymbolQuery &Q,
const SymbolNameSet &QuerySymbols);
- void transferFinalizedNodeDependencies(MaterializingInfo &DependantMI,
- const SymbolStringPtr &DependantName,
- MaterializingInfo &FinalizedMI);
+ void transferEmittedNodeDependencies(MaterializingInfo &DependantMI,
+ const SymbolStringPtr &DependantName,
+ MaterializingInfo &EmittedMI);
Error defineMaterializing(const SymbolFlagsMap &SymbolFlags);
void replace(std::unique_ptr<MaterializationUnit> MU);
- SymbolNameSet getRequestedSymbols(const SymbolFlagsMap &SymbolFlags);
+ SymbolNameSet getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const;
void addDependencies(const SymbolStringPtr &Name,
const SymbolDependenceMap &Dependants);
void resolve(const SymbolMap &Resolved);
- void finalize(const SymbolFlagsMap &Finalized);
+ void emit(const SymbolFlagsMap &Emitted);
void notifyFailed(const SymbolNameSet &FailedSymbols);
- ExecutionSessionBase &ES;
- std::string VSOName;
+ ExecutionSession &ES;
+ std::string JITDylibName;
SymbolMap Symbols;
UnmaterializedInfosMap UnmaterializedInfos;
MaterializingInfosMap MaterializingInfos;
- FallbackDefinitionGeneratorFunction FallbackDefinitionGenerator;
- VSOList SearchOrder;
+ GeneratorFunction DefGenerator;
+ JITDylibSearchList SearchOrder;
};
/// An ExecutionSession represents a running JIT program.
-class ExecutionSession : public ExecutionSessionBase {
+class ExecutionSession {
+ // FIXME: Remove this when we remove the old ORC layers.
+ friend class JITDylib;
+
public:
+ /// For reporting errors.
using ErrorReporter = std::function<void(Error)>;
- using DispatchMaterializationFunction =
- std::function<void(VSO &V, std::unique_ptr<MaterializationUnit> MU)>;
+ /// For dispatching MaterializationUnit::materialize calls.
+ using DispatchMaterializationFunction = std::function<void(
+ JITDylib &JD, std::unique_ptr<MaterializationUnit> MU)>;
- /// Construct an ExecutionEngine.
+ /// Construct an ExecutionSession.
///
/// SymbolStringPools may be shared between ExecutionSessions.
- ExecutionSession(std::shared_ptr<SymbolStringPool> SSP = nullptr)
- : ExecutionSessionBase(std::move(SSP)) {}
+ ExecutionSession(std::shared_ptr<SymbolStringPool> SSP = nullptr);
+
+ /// Add a symbol name to the SymbolStringPool and return a pointer to it.
+ SymbolStringPtr intern(StringRef SymName) { return SSP->intern(SymName); }
+
+ /// Returns a shared_ptr to the SymbolStringPool for this ExecutionSession.
+ std::shared_ptr<SymbolStringPool> getSymbolStringPool() const { return SSP; }
+
+ /// Run the given lambda with the session mutex locked.
+ template <typename Func> auto runSessionLocked(Func &&F) -> decltype(F()) {
+ std::lock_guard<std::recursive_mutex> Lock(SessionMutex);
+ return F();
+ }
+
+ /// Get the "main" JITDylib, which is created automatically on construction of
+ /// the ExecutionSession.
+ JITDylib &getMainJITDylib();
+
+ /// Add a new JITDylib to this ExecutionSession.
+ JITDylib &createJITDylib(std::string Name,
+ bool AddToMainDylibSearchOrder = true);
+
+ /// Allocate a module key for a new module to add to the JIT.
+ VModuleKey allocateVModule() {
+ return runSessionLocked([this]() { return ++LastKey; });
+ }
+
+ /// Return a module key to the ExecutionSession so that it can be
+ /// re-used. This should only be done once all resources associated
+ /// with the original key have been released.
+ void releaseVModule(VModuleKey Key) { /* FIXME: Recycle keys */
+ }
+
+ /// Set the error reporter function.
+ ExecutionSession &setErrorReporter(ErrorReporter ReportError) {
+ this->ReportError = std::move(ReportError);
+ return *this;
+ }
+
+ /// Report a error for this execution session.
+ ///
+ /// Unhandled errors can be sent here to log them.
+ void reportError(Error Err) { ReportError(std::move(Err)); }
+
+ /// Set the materialization dispatch function.
+ ExecutionSession &setDispatchMaterialization(
+ DispatchMaterializationFunction DispatchMaterialization) {
+ this->DispatchMaterialization = std::move(DispatchMaterialization);
+ return *this;
+ }
+
+ void legacyFailQuery(AsynchronousSymbolQuery &Q, Error Err);
+
+ using LegacyAsyncLookupFunction = std::function<SymbolNameSet(
+ std::shared_ptr<AsynchronousSymbolQuery> Q, SymbolNameSet Names)>;
+
+ /// A legacy lookup function for JITSymbolResolverAdapter.
+ /// Do not use -- this will be removed soon.
+ Expected<SymbolMap>
+ legacyLookup(LegacyAsyncLookupFunction AsyncLookup, SymbolNameSet Names,
+ bool WaiUntilReady,
+ RegisterDependenciesFunction RegisterDependencies);
+
+ /// Search the given JITDylib list for the given symbols.
+ ///
+ /// SearchOrder lists the JITDylibs to search. For each dylib, the associated
+ /// boolean indicates whether the search should match against non-exported
+ /// (hidden visibility) symbols in that dylib (true means match against
+ /// non-exported symbols, false means do not match).
+ ///
+ /// The OnResolve callback will be called once all requested symbols are
+ /// resolved, or if an error occurs prior to resolution.
+ ///
+ /// The OnReady callback will be called once all requested symbols are ready,
+ /// or if an error occurs after resolution but before all symbols are ready.
+ ///
+ /// If all symbols are found, the RegisterDependencies function will be called
+ /// while the session lock is held. This gives clients a chance to register
+ /// dependencies for on the queried symbols for any symbols they are
+ /// materializing (if a MaterializationResponsibility instance is present,
+ /// this can be implemented by calling
+ /// MaterializationResponsibility::addDependencies). If there are no
+ /// dependenant symbols for this query (e.g. it is being made by a top level
+ /// client to get an address to call) then the value NoDependenciesToRegister
+ /// can be used.
+ void lookup(const JITDylibSearchList &SearchOrder, SymbolNameSet Symbols,
+ SymbolsResolvedCallback OnResolve, SymbolsReadyCallback OnReady,
+ RegisterDependenciesFunction RegisterDependencies);
- /// Add a new VSO to this ExecutionSession.
- VSO &createVSO(std::string Name);
+ /// Blocking version of lookup above. Returns the resolved symbol map.
+ /// If WaitUntilReady is true (the default), will not return until all
+ /// requested symbols are ready (or an error occurs). If WaitUntilReady is
+ /// false, will return as soon as all requested symbols are resolved,
+ /// or an error occurs. If WaitUntilReady is false and an error occurs
+ /// after resolution, the function will return a success value, but the
+ /// error will be reported via reportErrors.
+ Expected<SymbolMap> lookup(const JITDylibSearchList &SearchOrder,
+ const SymbolNameSet &Symbols,
+ RegisterDependenciesFunction RegisterDependencies =
+ NoDependenciesToRegister,
+ bool WaitUntilReady = true);
+
+ /// Convenience version of blocking lookup.
+ /// Searches each of the JITDylibs in the search order in turn for the given
+ /// symbol.
+ Expected<JITEvaluatedSymbol> lookup(const JITDylibSearchList &SearchOrder,
+ SymbolStringPtr Symbol);
+
+ /// Convenience version of blocking lookup.
+ /// Searches each of the JITDylibs in the search order in turn for the given
+ /// symbol. The search will not find non-exported symbols.
+ Expected<JITEvaluatedSymbol> lookup(ArrayRef<JITDylib *> SearchOrder,
+ SymbolStringPtr Symbol);
+
+ /// Convenience version of blocking lookup.
+ /// Searches each of the JITDylibs in the search order in turn for the given
+ /// symbol. The search will not find non-exported symbols.
+ Expected<JITEvaluatedSymbol> lookup(ArrayRef<JITDylib *> SearchOrder,
+ StringRef Symbol);
+
+ /// Materialize the given unit.
+ void dispatchMaterialization(JITDylib &JD,
+ std::unique_ptr<MaterializationUnit> MU) {
+ LLVM_DEBUG(runSessionLocked([&]() {
+ dbgs() << "Compiling, for " << JD.getName() << ", " << *MU
+ << "\n";
+ }););
+ DispatchMaterialization(JD, std::move(MU));
+ }
+
+ /// Dump the state of all the JITDylibs in this session.
+ void dump(raw_ostream &OS);
private:
- std::vector<std::unique_ptr<VSO>> VSOs;
+ static void logErrorsToStdErr(Error Err) {
+ logAllUnhandledErrors(std::move(Err), errs(), "JIT session error: ");
+ }
+
+ static void
+ materializeOnCurrentThread(JITDylib &JD,
+ std::unique_ptr<MaterializationUnit> MU) {
+ MU->doMaterialize(JD);
+ }
+
+ void runOutstandingMUs();
+
+ mutable std::recursive_mutex SessionMutex;
+ std::shared_ptr<SymbolStringPool> SSP;
+ VModuleKey LastKey = 0;
+ ErrorReporter ReportError = logErrorsToStdErr;
+ DispatchMaterializationFunction DispatchMaterialization =
+ materializeOnCurrentThread;
+
+ std::vector<std::unique_ptr<JITDylib>> JDs;
+
+ // FIXME: Remove this (and runOutstandingMUs) once the linking layer works
+ // with callbacks from asynchronous queries.
+ mutable std::recursive_mutex OutstandingMUsMutex;
+ std::vector<std::pair<JITDylib *, std::unique_ptr<MaterializationUnit>>>
+ OutstandingMUs;
};
-/// Look up the given names in the given VSOs.
-/// VSOs will be searched in order and no VSO pointer may be null.
-/// All symbols must be found within the given VSOs or an error
-/// will be returned.
-Expected<SymbolMap> lookup(const VSOList &VSOs, SymbolNameSet Names);
+template <typename Func>
+auto JITDylib::withSearchOrderDo(Func &&F)
+ -> decltype(F(std::declval<const JITDylibSearchList &>())) {
+ return ES.runSessionLocked([&]() { return F(SearchOrder); });
+}
+
+template <typename MaterializationUnitType>
+Error JITDylib::define(std::unique_ptr<MaterializationUnitType> &&MU) {
+ assert(MU && "Can not define with a null MU");
+ return ES.runSessionLocked([&, this]() -> Error {
+ if (auto Err = defineImpl(*MU))
+ return Err;
-/// Look up a symbol by searching a list of VSOs.
-Expected<JITEvaluatedSymbol> lookup(const VSOList &VSOs, SymbolStringPtr Name);
+ /// defineImpl succeeded.
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU));
+ for (auto &KV : UMI->MU->getSymbols())
+ UnmaterializedInfos[KV.first] = UMI;
+
+ return Error::success();
+ });
+}
+
+template <typename MaterializationUnitType>
+Error JITDylib::define(std::unique_ptr<MaterializationUnitType> &MU) {
+ assert(MU && "Can not define with a null MU");
+
+ return ES.runSessionLocked([&, this]() -> Error {
+ if (auto Err = defineImpl(*MU))
+ return Err;
+
+ /// defineImpl succeeded.
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU));
+ for (auto &KV : UMI->MU->getSymbols())
+ UnmaterializedInfos[KV.first] = UMI;
+
+ return Error::success();
+ });
+}
/// Mangles symbol names then uniques them in the context of an
/// ExecutionSession.
class MangleAndInterner {
public:
- MangleAndInterner(ExecutionSessionBase &ES, const DataLayout &DL);
+ MangleAndInterner(ExecutionSession &ES, const DataLayout &DL);
SymbolStringPtr operator()(StringRef Name);
private:
- ExecutionSessionBase &ES;
+ ExecutionSession &ES;
const DataLayout &DL;
};
} // End namespace orc
} // End namespace llvm
+#undef DEBUG_TYPE // "orc"
+
#endif // LLVM_EXECUTIONENGINE_ORC_CORE_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index e27f6e1e2cd6..88559f822e5d 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -21,7 +21,6 @@
#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cstdint>
#include <string>
@@ -39,45 +38,6 @@ class Value;
namespace orc {
-/// A utility class for building TargetMachines for JITs.
-class JITTargetMachineBuilder {
-public:
- JITTargetMachineBuilder(Triple TT);
- static Expected<JITTargetMachineBuilder> detectHost();
- Expected<std::unique_ptr<TargetMachine>> createTargetMachine();
-
- JITTargetMachineBuilder &setArch(std::string Arch) {
- this->Arch = std::move(Arch);
- return *this;
- }
- JITTargetMachineBuilder &setCPU(std::string CPU) {
- this->CPU = std::move(CPU);
- return *this;
- }
- JITTargetMachineBuilder &setRelocationModel(Optional<Reloc::Model> RM) {
- this->RM = std::move(RM);
- return *this;
- }
- JITTargetMachineBuilder &setCodeModel(Optional<CodeModel::Model> CM) {
- this->CM = std::move(CM);
- return *this;
- }
- JITTargetMachineBuilder &
- addFeatures(const std::vector<std::string> &FeatureVec);
- SubtargetFeatures &getFeatures() { return Features; }
- TargetOptions &getOptions() { return Options; }
-
-private:
- Triple TT;
- std::string Arch;
- std::string CPU;
- SubtargetFeatures Features;
- TargetOptions Options;
- Optional<Reloc::Model> RM;
- Optional<CodeModel::Model> CM;
- CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
-};
-
/// This iterator provides a convenient way to iterate over the elements
/// of an llvm.global_ctors/llvm.global_dtors instance.
///
@@ -134,11 +94,11 @@ iterator_range<CtorDtorIterator> getDestructors(const Module &M);
/// Convenience class for recording constructor/destructor names for
/// later execution.
template <typename JITLayerT>
-class CtorDtorRunner {
+class LegacyCtorDtorRunner {
public:
/// Construct a CtorDtorRunner for the given range using the given
/// name mangling function.
- CtorDtorRunner(std::vector<std::string> CtorDtorNames, VModuleKey K)
+ LegacyCtorDtorRunner(std::vector<std::string> CtorDtorNames, VModuleKey K)
: CtorDtorNames(std::move(CtorDtorNames)), K(K) {}
/// Run the recorded constructors/destructors through the given JIT
@@ -169,9 +129,9 @@ private:
orc::VModuleKey K;
};
-class CtorDtorRunner2 {
+class CtorDtorRunner {
public:
- CtorDtorRunner2(VSO &V) : V(V) {}
+ CtorDtorRunner(JITDylib &JD) : JD(JD) {}
void add(iterator_range<CtorDtorIterator> CtorDtors);
Error run();
@@ -179,7 +139,7 @@ private:
using CtorDtorList = std::vector<SymbolStringPtr>;
using CtorDtorPriorityMap = std::map<unsigned, CtorDtorList>;
- VSO &V;
+ JITDylib &JD;
CtorDtorPriorityMap CtorDtorsByPriority;
};
@@ -217,11 +177,11 @@ protected:
void *DSOHandle);
};
-class LocalCXXRuntimeOverrides : public LocalCXXRuntimeOverridesBase {
+class LegacyLocalCXXRuntimeOverrides : public LocalCXXRuntimeOverridesBase {
public:
/// Create a runtime-overrides class.
template <typename MangleFtorT>
- LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
+ LegacyLocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
}
@@ -242,22 +202,44 @@ private:
StringMap<JITTargetAddress> CXXRuntimeOverrides;
};
-class LocalCXXRuntimeOverrides2 : public LocalCXXRuntimeOverridesBase {
+class LocalCXXRuntimeOverrides : public LocalCXXRuntimeOverridesBase {
public:
- Error enable(VSO &V, MangleAndInterner &Mangler);
+ Error enable(JITDylib &JD, MangleAndInterner &Mangler);
};
/// A utility class to expose symbols found via dlsym to the JIT.
///
-/// If an instance of this class is attached to a VSO as a fallback definition
-/// generator, then any symbol found in the given DynamicLibrary that passes
-/// the 'Allow' predicate will be added to the VSO.
-class DynamicLibraryFallbackGenerator {
+/// If an instance of this class is attached to a JITDylib as a fallback
+/// definition generator, then any symbol found in the given DynamicLibrary that
+/// passes the 'Allow' predicate will be added to the JITDylib.
+class DynamicLibrarySearchGenerator {
public:
using SymbolPredicate = std::function<bool(SymbolStringPtr)>;
- DynamicLibraryFallbackGenerator(sys::DynamicLibrary Dylib,
- const DataLayout &DL, SymbolPredicate Allow);
- SymbolNameSet operator()(VSO &V, const SymbolNameSet &Names);
+
+ /// Create a DynamicLibrarySearchGenerator that searches for symbols in the
+ /// given sys::DynamicLibrary.
+ /// If the Allow predicate is given then only symbols matching the predicate
+ /// will be searched for in the DynamicLibrary. If the predicate is not given
+ /// then all symbols will be searched for.
+ DynamicLibrarySearchGenerator(sys::DynamicLibrary Dylib, const DataLayout &DL,
+ SymbolPredicate Allow = SymbolPredicate());
+
+ /// Permanently loads the library at the given path and, on success, returns
+ /// a DynamicLibrarySearchGenerator that will search it for symbol definitions
+ /// in the library. On failure returns the reason the library failed to load.
+ static Expected<DynamicLibrarySearchGenerator>
+ Load(const char *FileName, const DataLayout &DL,
+ SymbolPredicate Allow = SymbolPredicate());
+
+ /// Creates a DynamicLibrarySearchGenerator that searches for symbols in
+ /// the current process.
+ static Expected<DynamicLibrarySearchGenerator>
+ GetForCurrentProcess(const DataLayout &DL,
+ SymbolPredicate Allow = SymbolPredicate()) {
+ return Load(nullptr, DL, std::move(Allow));
+ }
+
+ SymbolNameSet operator()(JITDylib &JD, const SymbolNameSet &Names);
private:
sys::DynamicLibrary Dylib;
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
index ad6481548d59..30d71e69cd70 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -28,21 +28,20 @@ class Module;
namespace orc {
-class IRCompileLayer2 : public IRLayer {
+class IRCompileLayer : public IRLayer {
public:
using CompileFunction =
std::function<Expected<std::unique_ptr<MemoryBuffer>>(Module &)>;
using NotifyCompiledFunction =
- std::function<void(VModuleKey K, std::unique_ptr<Module>)>;
+ std::function<void(VModuleKey K, ThreadSafeModule TSM)>;
- IRCompileLayer2(ExecutionSession &ES, ObjectLayer &BaseLayer,
- CompileFunction Compile);
+ IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ CompileFunction Compile);
void setNotifyCompiled(NotifyCompiledFunction NotifyCompiled);
- void emit(MaterializationResponsibility R, VModuleKey K,
- std::unique_ptr<Module> M) override;
+ void emit(MaterializationResponsibility R, ThreadSafeModule TSM) override;
private:
mutable std::mutex IRLayerMutex;
@@ -57,15 +56,15 @@ private:
/// object file and adds this module file to the layer below, which must
/// implement the object layer concept.
template <typename BaseLayerT, typename CompileFtor>
-class IRCompileLayer {
+class LegacyIRCompileLayer {
public:
/// Callback type for notifications when modules are compiled.
using NotifyCompiledCallback =
std::function<void(VModuleKey K, std::unique_ptr<Module>)>;
- /// Construct an IRCompileLayer with the given BaseLayer, which must
+ /// Construct an LegacyIRCompileLayer with the given BaseLayer, which must
/// implement the ObjectLayer concept.
- IRCompileLayer(
+ LegacyIRCompileLayer(
BaseLayerT &BaseLayer, CompileFtor Compile,
NotifyCompiledCallback NotifyCompiled = NotifyCompiledCallback())
: BaseLayer(BaseLayer), Compile(std::move(Compile)),
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
index 266a0f45b3e4..49e65b9f2a80 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -23,24 +23,24 @@ namespace llvm {
class Module;
namespace orc {
-class IRTransformLayer2 : public IRLayer {
+class IRTransformLayer : public IRLayer {
public:
+ using TransformFunction = std::function<Expected<ThreadSafeModule>(
+ ThreadSafeModule, const MaterializationResponsibility &R)>;
- using TransformFunction =
- std::function<Expected<std::unique_ptr<Module>>(std::unique_ptr<Module>)>;
-
- IRTransformLayer2(ExecutionSession &ES, IRLayer &BaseLayer,
- TransformFunction Transform = identityTransform);
+ IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
+ TransformFunction Transform = identityTransform);
void setTransform(TransformFunction Transform) {
this->Transform = std::move(Transform);
}
- void emit(MaterializationResponsibility R, VModuleKey K,
- std::unique_ptr<Module> M) override;
+ void emit(MaterializationResponsibility R, ThreadSafeModule TSM) override;
- static std::unique_ptr<Module> identityTransform(std::unique_ptr<Module> M) {
- return M;
+ static ThreadSafeModule
+ identityTransform(ThreadSafeModule TSM,
+ const MaterializationResponsibility &R) {
+ return TSM;
}
private:
@@ -53,11 +53,11 @@ private:
/// This layer applies a user supplied transform to each module that is added,
/// then adds the transformed module to the layer below.
template <typename BaseLayerT, typename TransformFtor>
-class IRTransformLayer {
+class LegacyIRTransformLayer {
public:
- /// Construct an IRTransformLayer with the given BaseLayer
- IRTransformLayer(BaseLayerT &BaseLayer,
+ /// Construct an LegacyIRTransformLayer with the given BaseLayer
+ LegacyIRTransformLayer(BaseLayerT &BaseLayer,
TransformFtor Transform = TransformFtor())
: BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
index 8b0b3fdb7df4..c2527802f6a7 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -47,92 +47,101 @@ class Value;
namespace orc {
-/// Target-independent base class for compile callback management.
-class JITCompileCallbackManager {
+/// Base class for pools of compiler re-entry trampolines.
+/// These trampolines are callable addresses that save all register state
+/// before calling a supplied function to return the trampoline landing
+/// address, then restore all state before jumping to that address. They
+/// are used by various ORC APIs to support lazy compilation
+class TrampolinePool {
public:
- using CompileFunction = std::function<JITTargetAddress()>;
+ virtual ~TrampolinePool() {}
- /// Construct a JITCompileCallbackManager.
- /// @param ErrorHandlerAddress The address of an error handler in the target
- /// process to be used if a compile callback fails.
- JITCompileCallbackManager(ExecutionSession &ES,
- JITTargetAddress ErrorHandlerAddress)
- : ES(ES), CallbacksVSO(ES.createVSO("<Callbacks>")),
- ErrorHandlerAddress(ErrorHandlerAddress) {}
-
- virtual ~JITCompileCallbackManager() = default;
-
- /// Reserve a compile callback.
- Expected<JITTargetAddress> getCompileCallback(CompileFunction Compile);
+ /// Get an available trampoline address.
+ /// Returns an error if no trampoline can be created.
+ virtual Expected<JITTargetAddress> getTrampoline() = 0;
- /// Execute the callback for the given trampoline id. Called by the JIT
- /// to compile functions on demand.
- JITTargetAddress executeCompileCallback(JITTargetAddress TrampolineAddr);
+private:
+ virtual void anchor();
+};
-protected:
- std::vector<JITTargetAddress> AvailableTrampolines;
+/// A trampoline pool for trampolines within the current process.
+template <typename ORCABI> class LocalTrampolinePool : public TrampolinePool {
+public:
+ using GetTrampolineLandingFunction =
+ std::function<JITTargetAddress(JITTargetAddress TrampolineAddr)>;
+
+ /// Creates a LocalTrampolinePool with the given RunCallback function.
+ /// Returns an error if this function is unable to correctly allocate, write
+ /// and protect the resolver code block.
+ static Expected<std::unique_ptr<LocalTrampolinePool>>
+ Create(GetTrampolineLandingFunction GetTrampolineLanding) {
+ Error Err = Error::success();
+
+ auto LTP = std::unique_ptr<LocalTrampolinePool>(
+ new LocalTrampolinePool(std::move(GetTrampolineLanding), Err));
+
+ if (Err)
+ return std::move(Err);
+ return std::move(LTP);
+ }
-private:
- Expected<JITTargetAddress> getAvailableTrampolineAddr() {
- if (this->AvailableTrampolines.empty())
+ /// Get a free trampoline. Returns an error if one can not be provide (e.g.
+ /// because the pool is empty and can not be grown).
+ Expected<JITTargetAddress> getTrampoline() override {
+ std::lock_guard<std::mutex> Lock(LTPMutex);
+ if (AvailableTrampolines.empty()) {
if (auto Err = grow())
return std::move(Err);
- assert(!this->AvailableTrampolines.empty() &&
- "Failed to grow available trampolines.");
- JITTargetAddress TrampolineAddr = this->AvailableTrampolines.back();
- this->AvailableTrampolines.pop_back();
+ }
+ assert(!AvailableTrampolines.empty() && "Failed to grow trampoline pool");
+ auto TrampolineAddr = AvailableTrampolines.back();
+ AvailableTrampolines.pop_back();
return TrampolineAddr;
}
- // Create new trampolines - to be implemented in subclasses.
- virtual Error grow() = 0;
+ /// Returns the given trampoline to the pool for re-use.
+ void releaseTrampoline(JITTargetAddress TrampolineAddr) {
+ std::lock_guard<std::mutex> Lock(LTPMutex);
+ AvailableTrampolines.push_back(TrampolineAddr);
+ }
- virtual void anchor();
+private:
+ static JITTargetAddress reenter(void *TrampolinePoolPtr, void *TrampolineId) {
+ LocalTrampolinePool<ORCABI> *TrampolinePool =
+ static_cast<LocalTrampolinePool *>(TrampolinePoolPtr);
+ return TrampolinePool->GetTrampolineLanding(static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineId)));
+ }
- std::mutex CCMgrMutex;
- ExecutionSession &ES;
- VSO &CallbacksVSO;
- JITTargetAddress ErrorHandlerAddress;
- std::map<JITTargetAddress, SymbolStringPtr> AddrToSymbol;
- size_t NextCallbackId = 0;
-};
+ LocalTrampolinePool(GetTrampolineLandingFunction GetTrampolineLanding,
+ Error &Err)
+ : GetTrampolineLanding(std::move(GetTrampolineLanding)) {
-/// Manage compile callbacks for in-process JITs.
-template <typename TargetT>
-class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
-public:
- /// Construct a InProcessJITCompileCallbackManager.
- /// @param ErrorHandlerAddress The address of an error handler in the target
- /// process to be used if a compile callback fails.
- LocalJITCompileCallbackManager(ExecutionSession &ES,
- JITTargetAddress ErrorHandlerAddress)
- : JITCompileCallbackManager(ES, ErrorHandlerAddress) {
- /// Set up the resolver block.
+ ErrorAsOutParameter _(&Err);
+
+ /// Try to set up the resolver block.
std::error_code EC;
ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
- TargetT::ResolverCodeSize, nullptr,
+ ORCABI::ResolverCodeSize, nullptr,
sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
- assert(!EC && "Failed to allocate resolver block");
+ if (EC) {
+ Err = errorCodeToError(EC);
+ return;
+ }
- TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
- &reenter, this);
+ ORCABI::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+ &reenter, this);
EC = sys::Memory::protectMappedMemory(ResolverBlock.getMemoryBlock(),
sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
- assert(!EC && "Failed to mprotect resolver block");
+ if (EC) {
+ Err = errorCodeToError(EC);
+ return;
+ }
}
-private:
- static JITTargetAddress reenter(void *CCMgr, void *TrampolineId) {
- JITCompileCallbackManager *Mgr =
- static_cast<JITCompileCallbackManager *>(CCMgr);
- return Mgr->executeCompileCallback(
- static_cast<JITTargetAddress>(
- reinterpret_cast<uintptr_t>(TrampolineId)));
- }
-
- Error grow() override {
+ Error grow() {
assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
std::error_code EC;
@@ -144,17 +153,17 @@ private:
return errorCodeToError(EC);
unsigned NumTrampolines =
- (sys::Process::getPageSize() - TargetT::PointerSize) /
- TargetT::TrampolineSize;
+ (sys::Process::getPageSize() - ORCABI::PointerSize) /
+ ORCABI::TrampolineSize;
uint8_t *TrampolineMem = static_cast<uint8_t *>(TrampolineBlock.base());
- TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
- NumTrampolines);
+ ORCABI::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+ NumTrampolines);
for (unsigned I = 0; I < NumTrampolines; ++I)
this->AvailableTrampolines.push_back(
static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(
- TrampolineMem + (I * TargetT::TrampolineSize))));
+ TrampolineMem + (I * ORCABI::TrampolineSize))));
if (auto EC = sys::Memory::protectMappedMemory(
TrampolineBlock.getMemoryBlock(),
@@ -165,8 +174,87 @@ private:
return Error::success();
}
+ GetTrampolineLandingFunction GetTrampolineLanding;
+
+ std::mutex LTPMutex;
sys::OwningMemoryBlock ResolverBlock;
std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+ std::vector<JITTargetAddress> AvailableTrampolines;
+};
+
+/// Target-independent base class for compile callback management.
+class JITCompileCallbackManager {
+public:
+ using CompileFunction = std::function<JITTargetAddress()>;
+
+ virtual ~JITCompileCallbackManager() = default;
+
+ /// Reserve a compile callback.
+ Expected<JITTargetAddress> getCompileCallback(CompileFunction Compile);
+
+ /// Execute the callback for the given trampoline id. Called by the JIT
+ /// to compile functions on demand.
+ JITTargetAddress executeCompileCallback(JITTargetAddress TrampolineAddr);
+
+protected:
+ /// Construct a JITCompileCallbackManager.
+ JITCompileCallbackManager(std::unique_ptr<TrampolinePool> TP,
+ ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress)
+ : TP(std::move(TP)), ES(ES),
+ CallbacksJD(ES.createJITDylib("<Callbacks>")),
+ ErrorHandlerAddress(ErrorHandlerAddress) {}
+
+ void setTrampolinePool(std::unique_ptr<TrampolinePool> TP) {
+ this->TP = std::move(TP);
+ }
+
+private:
+ std::mutex CCMgrMutex;
+ std::unique_ptr<TrampolinePool> TP;
+ ExecutionSession &ES;
+ JITDylib &CallbacksJD;
+ JITTargetAddress ErrorHandlerAddress;
+ std::map<JITTargetAddress, SymbolStringPtr> AddrToSymbol;
+ size_t NextCallbackId = 0;
+};
+
+/// Manage compile callbacks for in-process JITs.
+template <typename ORCABI>
+class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
+public:
+ /// Create a new LocalJITCompileCallbackManager.
+ static Expected<std::unique_ptr<LocalJITCompileCallbackManager>>
+ Create(ExecutionSession &ES, JITTargetAddress ErrorHandlerAddress) {
+ Error Err = Error::success();
+ auto CCMgr = std::unique_ptr<LocalJITCompileCallbackManager>(
+ new LocalJITCompileCallbackManager(ES, ErrorHandlerAddress, Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(CCMgr);
+ }
+
+private:
+ /// Construct a InProcessJITCompileCallbackManager.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ LocalJITCompileCallbackManager(ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress,
+ Error &Err)
+ : JITCompileCallbackManager(nullptr, ES, ErrorHandlerAddress) {
+ ErrorAsOutParameter _(&Err);
+ auto TP = LocalTrampolinePool<ORCABI>::Create(
+ [this](JITTargetAddress TrampolineAddr) {
+ return executeCompileCallback(TrampolineAddr);
+ });
+
+ if (!TP) {
+ Err = TP.takeError();
+ return;
+ }
+
+ setTrampolinePool(std::move(*TP));
+ }
};
/// Base class for managing collections of named indirect stubs.
@@ -207,6 +295,7 @@ class LocalIndirectStubsManager : public IndirectStubsManager {
public:
Error createStub(StringRef StubName, JITTargetAddress StubAddr,
JITSymbolFlags StubFlags) override {
+ std::lock_guard<std::mutex> Lock(StubsMutex);
if (auto Err = reserveStubs(1))
return Err;
@@ -216,6 +305,7 @@ public:
}
Error createStubs(const StubInitsMap &StubInits) override {
+ std::lock_guard<std::mutex> Lock(StubsMutex);
if (auto Err = reserveStubs(StubInits.size()))
return Err;
@@ -227,6 +317,7 @@ public:
}
JITEvaluatedSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+ std::lock_guard<std::mutex> Lock(StubsMutex);
auto I = StubIndexes.find(Name);
if (I == StubIndexes.end())
return nullptr;
@@ -242,6 +333,7 @@ public:
}
JITEvaluatedSymbol findPointer(StringRef Name) override {
+ std::lock_guard<std::mutex> Lock(StubsMutex);
auto I = StubIndexes.find(Name);
if (I == StubIndexes.end())
return nullptr;
@@ -254,11 +346,15 @@ public:
}
Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
+ using AtomicIntPtr = std::atomic<uintptr_t>;
+
+ std::lock_guard<std::mutex> Lock(StubsMutex);
auto I = StubIndexes.find(Name);
assert(I != StubIndexes.end() && "No stub pointer for symbol");
auto Key = I->second.first;
- *IndirectStubsInfos[Key.first].getPtr(Key.second) =
- reinterpret_cast<void *>(static_cast<uintptr_t>(NewAddr));
+ AtomicIntPtr *AtomicStubPtr = reinterpret_cast<AtomicIntPtr *>(
+ IndirectStubsInfos[Key.first].getPtr(Key.second));
+ *AtomicStubPtr = static_cast<uintptr_t>(NewAddr);
return Error::success();
}
@@ -288,6 +384,7 @@ private:
StubIndexes[StubName] = std::make_pair(Key, StubFlags);
}
+ std::mutex StubsMutex;
std::vector<typename TargetT::IndirectStubsInfo> IndirectStubsInfos;
using StubKey = std::pair<uint16_t, uint16_t>;
std::vector<StubKey> FreeStubs;
@@ -299,7 +396,7 @@ private:
/// The given target triple will determine the ABI, and the given
/// ErrorHandlerAddress will be used by the resulting compile callback
/// manager if a compile callback fails.
-std::unique_ptr<JITCompileCallbackManager>
+Expected<std::unique_ptr<JITCompileCallbackManager>>
createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
JITTargetAddress ErrorHandlerAddress);
@@ -325,12 +422,18 @@ GlobalVariable *createImplPointer(PointerType &PT, Module &M, const Twine &Name,
/// indirect call using the given function pointer.
void makeStub(Function &F, Value &ImplPointer);
-/// Raise linkage types and rename as necessary to ensure that all
-/// symbols are accessible for other modules.
-///
-/// This should be called before partitioning a module to ensure that the
-/// partitions retain access to each other's symbols.
-void makeAllSymbolsExternallyAccessible(Module &M);
+/// Promotes private symbols to global hidden, and renames to prevent clashes
+/// with other promoted symbols. The same SymbolPromoter instance should be
+/// used for all symbols to be added to a single JITDylib.
+class SymbolLinkagePromoter {
+public:
+ /// Promote symbols in the given module. Returns the set of global values
+ /// that have been renamed/promoted.
+ std::vector<GlobalValue *> operator()(Module &M);
+
+private:
+ unsigned NextId = 0;
+};
/// Clone a function declaration into a new module.
///
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h
new file mode 100644
index 000000000000..eb9b6bf2dea6
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h
@@ -0,0 +1,130 @@
+//===- JITTargetMachineBuilder.h - Build TargetMachines for JIT -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utitily for building TargetMachines for JITs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H
+#define LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+/// A utility class for building TargetMachines for JITs.
+class JITTargetMachineBuilder {
+public:
+ /// Create a JITTargetMachineBuilder based on the given triple.
+ ///
+ /// Note: TargetOptions is default-constructed, then EmulatedTLS and
+ /// ExplicitEmulatedTLS are set to true. If EmulatedTLS is not
+ /// required, these values should be reset before calling
+ /// createTargetMachine.
+ JITTargetMachineBuilder(Triple TT);
+
+ /// Create a JITTargetMachineBuilder for the host system.
+ ///
+ /// Note: TargetOptions is default-constructed, then EmulatedTLS and
+ /// ExplicitEmulatedTLS are set to true. If EmulatedTLS is not
+ /// required, these values should be reset before calling
+ /// createTargetMachine.
+ static Expected<JITTargetMachineBuilder> detectHost();
+
+ /// Create a TargetMachine.
+ ///
+ /// This operation will fail if the requested target is not registered,
+ /// in which case see llvm/Support/TargetSelect.h. To JIT IR the Target and
+ /// the target's AsmPrinter must both be registered. To JIT assembly
+ /// (including inline and module level assembly) the target's AsmParser must
+ /// also be registered.
+ Expected<std::unique_ptr<TargetMachine>> createTargetMachine();
+
+ /// Get the default DataLayout for the target.
+ ///
+ /// Note: This is reasonably expensive, as it creates a temporary
+ /// TargetMachine instance under the hood. It is only suitable for use during
+ /// JIT setup.
+ Expected<DataLayout> getDefaultDataLayoutForTarget() {
+ auto TM = createTargetMachine();
+ if (!TM)
+ return TM.takeError();
+ return (*TM)->createDataLayout();
+ }
+
+ /// Set the CPU string.
+ JITTargetMachineBuilder &setCPU(std::string CPU) {
+ this->CPU = std::move(CPU);
+ return *this;
+ }
+
+ /// Set the relocation model.
+ JITTargetMachineBuilder &setRelocationModel(Optional<Reloc::Model> RM) {
+ this->RM = std::move(RM);
+ return *this;
+ }
+
+ /// Set the code model.
+ JITTargetMachineBuilder &setCodeModel(Optional<CodeModel::Model> CM) {
+ this->CM = std::move(CM);
+ return *this;
+ }
+
+ /// Set the LLVM CodeGen optimization level.
+ JITTargetMachineBuilder &setCodeGenOptLevel(CodeGenOpt::Level OptLevel) {
+ this->OptLevel = OptLevel;
+ return *this;
+ }
+
+ /// Add subtarget features.
+ JITTargetMachineBuilder &
+ addFeatures(const std::vector<std::string> &FeatureVec);
+
+ /// Access subtarget features.
+ SubtargetFeatures &getFeatures() { return Features; }
+
+ /// Access subtarget features.
+ const SubtargetFeatures &getFeatures() const { return Features; }
+
+ /// Access TargetOptions.
+ TargetOptions &getOptions() { return Options; }
+
+ /// Access TargetOptions.
+ const TargetOptions &getOptions() const { return Options; }
+
+ /// Access Triple.
+ Triple &getTargetTriple() { return TT; }
+
+ /// Access Triple.
+ const Triple &getTargetTriple() const { return TT; }
+
+private:
+ Triple TT;
+ std::string CPU;
+ SubtargetFeatures Features;
+ TargetOptions Options;
+ Optional<Reloc::Model> RM;
+ Optional<CodeModel::Model> CM;
+ CodeGenOpt::Level OptLevel = CodeGenOpt::None;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
index df655bd82006..ce3e5d519c73 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
@@ -19,9 +19,11 @@
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
+#include "llvm/Support/ThreadPool.h"
namespace llvm {
namespace orc {
@@ -29,44 +31,68 @@ namespace orc {
/// A pre-fabricated ORC JIT stack that can serve as an alternative to MCJIT.
class LLJIT {
public:
+
+ /// Destruct this instance. If a multi-threaded instance, waits for all
+ /// compile threads to complete.
+ ~LLJIT();
+
/// Create an LLJIT instance.
+ /// If NumCompileThreads is not equal to zero, creates a multi-threaded
+ /// LLJIT with the given number of compile threads.
static Expected<std::unique_ptr<LLJIT>>
- Create(std::unique_ptr<ExecutionSession> ES,
- std::unique_ptr<TargetMachine> TM, DataLayout DL);
+ Create(JITTargetMachineBuilder JTMB, DataLayout DL,
+ unsigned NumCompileThreads = 0);
- /// Returns a reference to the ExecutionSession for this JIT instance.
+ /// Returns the ExecutionSession for this instance.
ExecutionSession &getExecutionSession() { return *ES; }
- /// Returns a reference to the VSO representing the JIT'd main program.
- VSO &getMainVSO() { return Main; }
+ /// Returns a reference to the JITDylib representing the JIT'd main program.
+ JITDylib &getMainJITDylib() { return Main; }
+
+ /// Create a new JITDylib with the given name and return a reference to it.
+ JITDylib &createJITDylib(std::string Name) {
+ return ES->createJITDylib(std::move(Name));
+ }
/// Convenience method for defining an absolute symbol.
Error defineAbsolute(StringRef Name, JITEvaluatedSymbol Address);
- /// Adds an IR module to the given VSO.
- Error addIRModule(VSO &V, std::unique_ptr<Module> M);
+ /// Convenience method for defining an
- /// Adds an IR module to the Main VSO.
- Error addIRModule(std::unique_ptr<Module> M) {
- return addIRModule(Main, std::move(M));
+ /// Adds an IR module to the given JITDylib.
+ Error addIRModule(JITDylib &JD, ThreadSafeModule TSM);
+
+ /// Adds an IR module to the Main JITDylib.
+ Error addIRModule(ThreadSafeModule TSM) {
+ return addIRModule(Main, std::move(TSM));
}
- /// Look up a symbol in VSO V by the symbol's linker-mangled name (to look up
- /// symbols based on their IR name use the lookup function instead).
- Expected<JITEvaluatedSymbol> lookupLinkerMangled(VSO &V, StringRef Name);
+ /// Adds an object file to the given JITDylib.
+ Error addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj);
+
+ /// Adds an object file to the given JITDylib.
+ Error addObjectFile(std::unique_ptr<MemoryBuffer> Obj) {
+ return addObjectFile(Main, std::move(Obj));
+ }
- /// Look up a symbol in the main VSO by the symbol's linker-mangled name (to
+ /// Look up a symbol in JITDylib JD by the symbol's linker-mangled name (to
/// look up symbols based on their IR name use the lookup function instead).
+ Expected<JITEvaluatedSymbol> lookupLinkerMangled(JITDylib &JD,
+ StringRef Name);
+
+ /// Look up a symbol in the main JITDylib by the symbol's linker-mangled name
+ /// (to look up symbols based on their IR name use the lookup function
+ /// instead).
Expected<JITEvaluatedSymbol> lookupLinkerMangled(StringRef Name) {
return lookupLinkerMangled(Main, Name);
}
- /// Look up a symbol in VSO V based on its IR symbol name.
- Expected<JITEvaluatedSymbol> lookup(VSO &V, StringRef UnmangledName) {
- return lookupLinkerMangled(V, mangle(UnmangledName));
+ /// Look up a symbol in JITDylib JD based on its IR symbol name.
+ Expected<JITEvaluatedSymbol> lookup(JITDylib &JD, StringRef UnmangledName) {
+ return lookupLinkerMangled(JD, mangle(UnmangledName));
}
- /// Look up a symbol in the main VSO based on its IR symbol name.
+ /// Look up a symbol in the main JITDylib based on its IR symbol name.
Expected<JITEvaluatedSymbol> lookup(StringRef UnmangledName) {
return lookup(Main, UnmangledName);
}
@@ -77,11 +103,18 @@ public:
/// Runs all not-yet-run static destructors.
Error runDestructors() { return DtorRunner.run(); }
+ /// Returns a reference to the ObjLinkingLayer
+ RTDyldObjectLinkingLayer &getObjLinkingLayer() { return ObjLinkingLayer; }
+
protected:
+
+ /// Create an LLJIT instance with a single compile thread.
LLJIT(std::unique_ptr<ExecutionSession> ES, std::unique_ptr<TargetMachine> TM,
DataLayout DL);
- std::shared_ptr<RuntimeDyld::MemoryManager> getMemoryManager(VModuleKey K);
+ /// Create an LLJIT instance with multiple compile threads.
+ LLJIT(std::unique_ptr<ExecutionSession> ES, JITTargetMachineBuilder JTMB,
+ DataLayout DL, unsigned NumCompileThreads);
std::string mangle(StringRef UnmangledName);
@@ -90,51 +123,68 @@ protected:
void recordCtorDtors(Module &M);
std::unique_ptr<ExecutionSession> ES;
- VSO &Main;
+ JITDylib &Main;
- std::unique_ptr<TargetMachine> TM;
DataLayout DL;
+ std::unique_ptr<ThreadPool> CompileThreads;
- RTDyldObjectLinkingLayer2 ObjLinkingLayer;
- IRCompileLayer2 CompileLayer;
+ RTDyldObjectLinkingLayer ObjLinkingLayer;
+ IRCompileLayer CompileLayer;
- CtorDtorRunner2 CtorRunner, DtorRunner;
+ CtorDtorRunner CtorRunner, DtorRunner;
};
/// An extended version of LLJIT that supports lazy function-at-a-time
/// compilation of LLVM IR.
class LLLazyJIT : public LLJIT {
public:
+
/// Create an LLLazyJIT instance.
+ /// If NumCompileThreads is not equal to zero, creates a multi-threaded
+ /// LLLazyJIT with the given number of compile threads.
static Expected<std::unique_ptr<LLLazyJIT>>
- Create(std::unique_ptr<ExecutionSession> ES,
- std::unique_ptr<TargetMachine> TM, DataLayout DL, LLVMContext &Ctx);
+ Create(JITTargetMachineBuilder JTMB, DataLayout DL,
+ JITTargetAddress ErrorAddr, unsigned NumCompileThreads = 0);
/// Set an IR transform (e.g. pass manager pipeline) to run on each function
/// when it is compiled.
- void setLazyCompileTransform(IRTransformLayer2::TransformFunction Transform) {
+ void setLazyCompileTransform(IRTransformLayer::TransformFunction Transform) {
TransformLayer.setTransform(std::move(Transform));
}
- /// Add a module to be lazily compiled to VSO V.
- Error addLazyIRModule(VSO &V, std::unique_ptr<Module> M);
+ /// Sets the partition function.
+ void
+ setPartitionFunction(CompileOnDemandLayer::PartitionFunction Partition) {
+ CODLayer.setPartitionFunction(std::move(Partition));
+ }
+
+ /// Add a module to be lazily compiled to JITDylib JD.
+ Error addLazyIRModule(JITDylib &JD, ThreadSafeModule M);
- /// Add a module to be lazily compiled to the main VSO.
- Error addLazyIRModule(std::unique_ptr<Module> M) {
+ /// Add a module to be lazily compiled to the main JITDylib.
+ Error addLazyIRModule(ThreadSafeModule M) {
return addLazyIRModule(Main, std::move(M));
}
private:
+
+ // Create a single-threaded LLLazyJIT instance.
LLLazyJIT(std::unique_ptr<ExecutionSession> ES,
- std::unique_ptr<TargetMachine> TM, DataLayout DL, LLVMContext &Ctx,
- std::unique_ptr<JITCompileCallbackManager> CCMgr,
+ std::unique_ptr<TargetMachine> TM, DataLayout DL,
+ std::unique_ptr<LazyCallThroughManager> LCTMgr,
+ std::function<std::unique_ptr<IndirectStubsManager>()> ISMBuilder);
+
+ // Create a multi-threaded LLLazyJIT instance.
+ LLLazyJIT(std::unique_ptr<ExecutionSession> ES, JITTargetMachineBuilder JTMB,
+ DataLayout DL, unsigned NumCompileThreads,
+ std::unique_ptr<LazyCallThroughManager> LCTMgr,
std::function<std::unique_ptr<IndirectStubsManager>()> ISMBuilder);
- std::unique_ptr<JITCompileCallbackManager> CCMgr;
+ std::unique_ptr<LazyCallThroughManager> LCTMgr;
std::function<std::unique_ptr<IndirectStubsManager>()> ISMBuilder;
- IRTransformLayer2 TransformLayer;
- CompileOnDemandLayer2 CODLayer;
+ IRTransformLayer TransformLayer;
+ CompileOnDemandLayer CODLayer;
};
} // End namespace orc
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Layer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
index 91bd4fb83e6f..cd797445a2e6 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
@@ -15,7 +15,9 @@
#define LLVM_EXECUTIONENGINE_ORC_LAYER_H
#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
namespace orc {
@@ -29,14 +31,32 @@ public:
/// Returns the ExecutionSession for this layer.
ExecutionSession &getExecutionSession() { return ES; }
- /// Adds a MaterializationUnit representing the given IR to the given VSO.
- virtual Error add(VSO &V, VModuleKey K, std::unique_ptr<Module> M);
+ /// Sets the CloneToNewContextOnEmit flag (false by default).
+ ///
+ /// When set, IR modules added to this layer will be cloned on to a new
+ /// context before emit is called. This can be used by clients who want
+ /// to load all IR using one LLVMContext (to save memory via type and
+ /// constant uniquing), but want to move Modules to fresh contexts before
+ /// compiling them to enable concurrent compilation.
+ /// Single threaded clients, or clients who load every module on a new
+ /// context, need not set this.
+ void setCloneToNewContextOnEmit(bool CloneToNewContextOnEmit) {
+ this->CloneToNewContextOnEmit = CloneToNewContextOnEmit;
+ }
+
+ /// Returns the current value of the CloneToNewContextOnEmit flag.
+ bool getCloneToNewContextOnEmit() const { return CloneToNewContextOnEmit; }
+
+ /// Adds a MaterializationUnit representing the given IR to the given
+ /// JITDylib.
+ virtual Error add(JITDylib &JD, ThreadSafeModule TSM,
+ VModuleKey K = VModuleKey());
/// Emit should materialize the given IR.
- virtual void emit(MaterializationResponsibility R, VModuleKey K,
- std::unique_ptr<Module> M) = 0;
+ virtual void emit(MaterializationResponsibility R, ThreadSafeModule TSM) = 0;
private:
+ bool CloneToNewContextOnEmit = false;
ExecutionSession &ES;
};
@@ -50,22 +70,29 @@ public:
/// Create an IRMaterializationLayer. Scans the module to build the
/// SymbolFlags and SymbolToDefinition maps.
- IRMaterializationUnit(ExecutionSession &ES, std::unique_ptr<Module> M);
+ IRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
+ VModuleKey K);
/// Create an IRMaterializationLayer from a module, and pre-existing
/// SymbolFlags and SymbolToDefinition maps. The maps must provide
/// entries for each definition in M.
/// This constructor is useful for delegating work from one
/// IRMaterializationUnit to another.
- IRMaterializationUnit(std::unique_ptr<Module> M, SymbolFlagsMap SymbolFlags,
+ IRMaterializationUnit(ThreadSafeModule TSM, VModuleKey K,
+ SymbolFlagsMap SymbolFlags,
SymbolNameToDefinitionMap SymbolToDefinition);
+ /// Return the ModuleIdentifier as the name for this MaterializationUnit.
+ StringRef getName() const override;
+
+ const ThreadSafeModule &getModule() const { return TSM; }
+
protected:
- std::unique_ptr<Module> M;
+ ThreadSafeModule TSM;
SymbolNameToDefinitionMap SymbolToDefinition;
private:
- void discard(const VSO &V, SymbolStringPtr Name) override;
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
};
/// MaterializationUnit that materializes modules by calling the 'emit' method
@@ -73,7 +100,8 @@ private:
class BasicIRLayerMaterializationUnit : public IRMaterializationUnit {
public:
BasicIRLayerMaterializationUnit(IRLayer &L, VModuleKey K,
- std::unique_ptr<Module> M);
+ ThreadSafeModule TSM);
+
private:
void materialize(MaterializationResponsibility R) override;
@@ -91,11 +119,13 @@ public:
/// Returns the execution session for this layer.
ExecutionSession &getExecutionSession() { return ES; }
- /// Adds a MaterializationUnit representing the given IR to the given VSO.
- virtual Error add(VSO &V, VModuleKey K, std::unique_ptr<MemoryBuffer> O);
+ /// Adds a MaterializationUnit representing the given IR to the given
+ /// JITDylib.
+ virtual Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O,
+ VModuleKey K = VModuleKey());
/// Emit should materialize the given IR.
- virtual void emit(MaterializationResponsibility R, VModuleKey K,
+ virtual void emit(MaterializationResponsibility R,
std::unique_ptr<MemoryBuffer> O) = 0;
private:
@@ -106,23 +136,31 @@ private:
/// instance) by calling 'emit' on the given ObjectLayer.
class BasicObjectLayerMaterializationUnit : public MaterializationUnit {
public:
+ static Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
+ Create(ObjectLayer &L, VModuleKey K, std::unique_ptr<MemoryBuffer> O);
-
- /// The MemoryBuffer should represent a valid object file.
- /// If there is any chance that the file is invalid it should be validated
- /// prior to constructing a BasicObjectLayerMaterializationUnit.
BasicObjectLayerMaterializationUnit(ObjectLayer &L, VModuleKey K,
- std::unique_ptr<MemoryBuffer> O);
+ std::unique_ptr<MemoryBuffer> O,
+ SymbolFlagsMap SymbolFlags);
+
+ /// Return the buffer's identifier as the name for this MaterializationUnit.
+ StringRef getName() const override;
private:
+
void materialize(MaterializationResponsibility R) override;
- void discard(const VSO &V, SymbolStringPtr Name) override;
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
ObjectLayer &L;
- VModuleKey K;
std::unique_ptr<MemoryBuffer> O;
};
+/// Returns a SymbolFlagsMap for the object file represented by the given
+/// buffer, or an error if the buffer does not contain a valid object file.
+// FIXME: Maybe move to Core.h?
+Expected<SymbolFlagsMap> getObjectSymbolFlags(ExecutionSession &ES,
+ MemoryBufferRef ObjBuffer);
+
} // End namespace orc
} // End namespace llvm
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h
new file mode 100644
index 000000000000..b5041325bce2
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyReexports.h
@@ -0,0 +1,195 @@
+//===------ LazyReexports.h -- Utilities for lazy reexports -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lazy re-exports are similar to normal re-exports, except that for callable
+// symbols the definitions are replaced with trampolines that will look up and
+// call through to the re-exported symbol at runtime. This can be used to
+// enable lazy compilation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H
+#define LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+
+namespace llvm {
+
+class Triple;
+
+namespace orc {
+
+/// Manages a set of 'lazy call-through' trampolines. These are compiler
+/// re-entry trampolines that are pre-bound to look up a given symbol in a given
+/// JITDylib, then jump to that address. Since compilation of symbols is
+/// triggered on first lookup, these call-through trampolines can be used to
+/// implement lazy compilation.
+///
+/// The easiest way to construct these call-throughs is using the lazyReexport
+/// function.
+class LazyCallThroughManager {
+public:
+ /// Clients will want to take some action on first resolution, e.g. updating
+ /// a stub pointer. Instances of this class can be used to implement this.
+ class NotifyResolvedFunction {
+ public:
+ virtual ~NotifyResolvedFunction() {}
+
+ /// Called the first time a lazy call through is executed and the target
+ /// symbol resolved.
+ virtual Error operator()(JITDylib &SourceJD,
+ const SymbolStringPtr &SymbolName,
+ JITTargetAddress ResolvedAddr) = 0;
+
+ private:
+ virtual void anchor();
+ };
+
+ template <typename NotifyResolvedImpl>
+ class NotifyResolvedFunctionImpl : public NotifyResolvedFunction {
+ public:
+ NotifyResolvedFunctionImpl(NotifyResolvedImpl NotifyResolved)
+ : NotifyResolved(std::move(NotifyResolved)) {}
+ Error operator()(JITDylib &SourceJD, const SymbolStringPtr &SymbolName,
+ JITTargetAddress ResolvedAddr) {
+ return NotifyResolved(SourceJD, SymbolName, ResolvedAddr);
+ }
+
+ private:
+ NotifyResolvedImpl NotifyResolved;
+ };
+
+ /// Create a shared NotifyResolvedFunction from a given type that is
+ /// callable with the correct signature.
+ template <typename NotifyResolvedImpl>
+ static std::unique_ptr<NotifyResolvedFunction>
+ createNotifyResolvedFunction(NotifyResolvedImpl NotifyResolved) {
+ return llvm::make_unique<NotifyResolvedFunctionImpl<NotifyResolvedImpl>>(
+ std::move(NotifyResolved));
+ }
+
+ // Return a free call-through trampoline and bind it to look up and call
+ // through to the given symbol.
+ Expected<JITTargetAddress> getCallThroughTrampoline(
+ JITDylib &SourceJD, SymbolStringPtr SymbolName,
+ std::shared_ptr<NotifyResolvedFunction> NotifyResolved);
+
+protected:
+ LazyCallThroughManager(ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr,
+ std::unique_ptr<TrampolinePool> TP);
+
+ JITTargetAddress callThroughToSymbol(JITTargetAddress TrampolineAddr);
+
+ void setTrampolinePool(std::unique_ptr<TrampolinePool> TP) {
+ this->TP = std::move(TP);
+ }
+
+private:
+ using ReexportsMap =
+ std::map<JITTargetAddress, std::pair<JITDylib *, SymbolStringPtr>>;
+
+ using NotifiersMap =
+ std::map<JITTargetAddress, std::shared_ptr<NotifyResolvedFunction>>;
+
+ std::mutex LCTMMutex;
+ ExecutionSession &ES;
+ JITTargetAddress ErrorHandlerAddr;
+ std::unique_ptr<TrampolinePool> TP;
+ ReexportsMap Reexports;
+ NotifiersMap Notifiers;
+};
+
+/// A lazy call-through manager that builds trampolines in the current process.
+class LocalLazyCallThroughManager : public LazyCallThroughManager {
+private:
+ LocalLazyCallThroughManager(ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr)
+ : LazyCallThroughManager(ES, ErrorHandlerAddr, nullptr) {}
+
+ template <typename ORCABI> Error init() {
+ auto TP = LocalTrampolinePool<ORCABI>::Create(
+ [this](JITTargetAddress TrampolineAddr) {
+ return callThroughToSymbol(TrampolineAddr);
+ });
+
+ if (!TP)
+ return TP.takeError();
+
+ setTrampolinePool(std::move(*TP));
+ return Error::success();
+ }
+
+public:
+ /// Create a LocalLazyCallThroughManager using the given ABI. See
+ /// createLocalLazyCallThroughManager.
+ template <typename ORCABI>
+ static Expected<std::unique_ptr<LocalLazyCallThroughManager>>
+ Create(ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr) {
+ auto LLCTM = std::unique_ptr<LocalLazyCallThroughManager>(
+ new LocalLazyCallThroughManager(ES, ErrorHandlerAddr));
+
+ if (auto Err = LLCTM->init<ORCABI>())
+ return std::move(Err);
+
+ return std::move(LLCTM);
+ }
+};
+
+/// Create a LocalLazyCallThroughManager from the given triple and execution
+/// session.
+Expected<std::unique_ptr<LazyCallThroughManager>>
+createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr);
+
+/// A materialization unit that builds lazy re-exports. These are callable
+/// entry points that call through to the given symbols.
+/// Unlike a 'true' re-export, the address of the lazy re-export will not
+/// match the address of the re-exported symbol, but calling it will behave
+/// the same as calling the re-exported symbol.
+class LazyReexportsMaterializationUnit : public MaterializationUnit {
+public:
+ LazyReexportsMaterializationUnit(LazyCallThroughManager &LCTManager,
+ IndirectStubsManager &ISManager,
+ JITDylib &SourceJD,
+ SymbolAliasMap CallableAliases,
+ VModuleKey K);
+
+ StringRef getName() const override;
+
+private:
+ void materialize(MaterializationResponsibility R) override;
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
+ static SymbolFlagsMap extractFlags(const SymbolAliasMap &Aliases);
+
+ LazyCallThroughManager &LCTManager;
+ IndirectStubsManager &ISManager;
+ JITDylib &SourceJD;
+ SymbolAliasMap CallableAliases;
+ std::shared_ptr<LazyCallThroughManager::NotifyResolvedFunction>
+ NotifyResolved;
+};
+
+/// Define lazy-reexports based on the given SymbolAliasMap. Each lazy re-export
+/// is a callable symbol that will look up and dispatch to the given aliasee on
+/// first call. All subsequent calls will go directly to the aliasee.
+inline std::unique_ptr<LazyReexportsMaterializationUnit>
+lazyReexports(LazyCallThroughManager &LCTManager,
+ IndirectStubsManager &ISManager, JITDylib &SourceJD,
+ SymbolAliasMap CallableAliases, VModuleKey K = VModuleKey()) {
+ return llvm::make_unique<LazyReexportsMaterializationUnit>(
+ LCTManager, ISManager, SourceJD, std::move(CallableAliases),
+ std::move(K));
+}
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Legacy.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Legacy.h
index 52c8c162ff0b..4c6162ac4b8b 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/Legacy.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/Legacy.h
@@ -31,12 +31,12 @@ class SymbolResolver {
public:
virtual ~SymbolResolver() = default;
- /// Returns the flags for each symbol in Symbols that can be found,
- /// along with the set of symbol that could not be found.
- virtual SymbolFlagsMap lookupFlags(const SymbolNameSet &Symbols) = 0;
+ /// Returns the subset of the given symbols that the caller is responsible for
+ /// materializing.
+ virtual SymbolNameSet getResponsibilitySet(const SymbolNameSet &Symbols) = 0;
/// For each symbol in Symbols that can be found, assigns that symbols
- /// value in Query. Returns the set of symbols that could not be found.
+ /// value in Query. Returns the set of symbols that could not be found.
virtual SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
SymbolNameSet Symbols) = 0;
@@ -46,16 +46,18 @@ private:
/// Implements SymbolResolver with a pair of supplied function objects
/// for convenience. See createSymbolResolver.
-template <typename LookupFlagsFn, typename LookupFn>
+template <typename GetResponsibilitySetFn, typename LookupFn>
class LambdaSymbolResolver final : public SymbolResolver {
public:
- template <typename LookupFlagsFnRef, typename LookupFnRef>
- LambdaSymbolResolver(LookupFlagsFnRef &&LookupFlags, LookupFnRef &&Lookup)
- : LookupFlags(std::forward<LookupFlagsFnRef>(LookupFlags)),
+ template <typename GetResponsibilitySetFnRef, typename LookupFnRef>
+ LambdaSymbolResolver(GetResponsibilitySetFnRef &&GetResponsibilitySet,
+ LookupFnRef &&Lookup)
+ : GetResponsibilitySet(
+ std::forward<GetResponsibilitySetFnRef>(GetResponsibilitySet)),
Lookup(std::forward<LookupFnRef>(Lookup)) {}
- SymbolFlagsMap lookupFlags(const SymbolNameSet &Symbols) final {
- return LookupFlags(Symbols);
+ SymbolNameSet getResponsibilitySet(const SymbolNameSet &Symbols) final {
+ return GetResponsibilitySet(Symbols);
}
SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
@@ -64,34 +66,37 @@ public:
}
private:
- LookupFlagsFn LookupFlags;
+ GetResponsibilitySetFn GetResponsibilitySet;
LookupFn Lookup;
};
/// Creates a SymbolResolver implementation from the pair of supplied
/// function objects.
-template <typename LookupFlagsFn, typename LookupFn>
+template <typename GetResponsibilitySetFn, typename LookupFn>
std::unique_ptr<LambdaSymbolResolver<
typename std::remove_cv<
- typename std::remove_reference<LookupFlagsFn>::type>::type,
+ typename std::remove_reference<GetResponsibilitySetFn>::type>::type,
typename std::remove_cv<
typename std::remove_reference<LookupFn>::type>::type>>
-createSymbolResolver(LookupFlagsFn &&LookupFlags, LookupFn &&Lookup) {
+createSymbolResolver(GetResponsibilitySetFn &&GetResponsibilitySet,
+ LookupFn &&Lookup) {
using LambdaSymbolResolverImpl = LambdaSymbolResolver<
typename std::remove_cv<
- typename std::remove_reference<LookupFlagsFn>::type>::type,
+ typename std::remove_reference<GetResponsibilitySetFn>::type>::type,
typename std::remove_cv<
typename std::remove_reference<LookupFn>::type>::type>;
return llvm::make_unique<LambdaSymbolResolverImpl>(
- std::forward<LookupFlagsFn>(LookupFlags), std::forward<LookupFn>(Lookup));
+ std::forward<GetResponsibilitySetFn>(GetResponsibilitySet),
+ std::forward<LookupFn>(Lookup));
}
+/// Legacy adapter. Remove once we kill off the old ORC layers.
class JITSymbolResolverAdapter : public JITSymbolResolver {
public:
JITSymbolResolverAdapter(ExecutionSession &ES, SymbolResolver &R,
MaterializationResponsibility *MR);
- Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) override;
- Expected<LookupResult> lookup(const LookupSet &Symbols) override;
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) override;
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) override;
private:
ExecutionSession &ES;
@@ -100,27 +105,29 @@ private:
MaterializationResponsibility *MR;
};
-/// Use the given legacy-style FindSymbol function (i.e. a function that
-/// takes a const std::string& or StringRef and returns a JITSymbol) to
-/// find the flags for each symbol in Symbols and store their flags in
-/// SymbolFlags. If any JITSymbol returned by FindSymbol is in an error
-/// state the function returns immediately with that error, otherwise it
-/// returns the set of symbols not found.
+/// Use the given legacy-style FindSymbol function (i.e. a function that takes
+/// a const std::string& or StringRef and returns a JITSymbol) to get the
+/// subset of symbols that the caller is responsible for materializing. If any
+/// JITSymbol returned by FindSymbol is in an error state the function returns
+/// immediately with that error.
///
-/// Useful for implementing lookupFlags bodies that query legacy resolvers.
+/// Useful for implementing getResponsibilitySet bodies that query legacy
+/// resolvers.
template <typename FindSymbolFn>
-Expected<SymbolFlagsMap> lookupFlagsWithLegacyFn(const SymbolNameSet &Symbols,
- FindSymbolFn FindSymbol) {
- SymbolFlagsMap SymbolFlags;
+Expected<SymbolNameSet>
+getResponsibilitySetWithLegacyFn(const SymbolNameSet &Symbols,
+ FindSymbolFn FindSymbol) {
+ SymbolNameSet Result;
for (auto &S : Symbols) {
- if (JITSymbol Sym = FindSymbol(*S))
- SymbolFlags[S] = Sym.getFlags();
- else if (auto Err = Sym.takeError())
+ if (JITSymbol Sym = FindSymbol(*S)) {
+ if (!Sym.getFlags().isStrong())
+ Result.insert(S);
+ } else if (auto Err = Sym.takeError())
return std::move(Err);
}
- return SymbolFlags;
+ return Result;
}
/// Use the given legacy-style FindSymbol function (i.e. a function that
@@ -177,12 +184,13 @@ public:
: ES(ES), LegacyLookup(std::move(LegacyLookup)),
ReportError(std::move(ReportError)) {}
- SymbolFlagsMap lookupFlags(const SymbolNameSet &Symbols) final {
- if (auto SymbolFlags = lookupFlagsWithLegacyFn(Symbols, LegacyLookup))
- return std::move(*SymbolFlags);
+ SymbolNameSet getResponsibilitySet(const SymbolNameSet &Symbols) final {
+ if (auto ResponsibilitySet =
+ getResponsibilitySetWithLegacyFn(Symbols, LegacyLookup))
+ return std::move(*ResponsibilitySet);
else {
- ReportError(SymbolFlags.takeError());
- return SymbolFlagsMap();
+ ReportError(ResponsibilitySet.takeError());
+ return SymbolNameSet();
}
}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h
index 3dd3cfe05b8d..03fefb69a928 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h
@@ -23,10 +23,10 @@ namespace orc {
class NullResolver : public SymbolResolver {
public:
- SymbolFlagsMap lookupFlags(const SymbolNameSet &Symbols) override;
+ SymbolNameSet getResponsibilitySet(const SymbolNameSet &Symbols) final;
SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
- SymbolNameSet Symbols) override;
+ SymbolNameSet Symbols) final;
};
/// SymbolResolver impliementation that rejects all resolution requests.
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
index c6b43a9c8ed6..44d6b490e19d 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -23,16 +23,16 @@
namespace llvm {
namespace orc {
-class ObjectTransformLayer2 : public ObjectLayer {
+class ObjectTransformLayer : public ObjectLayer {
public:
using TransformFunction =
std::function<Expected<std::unique_ptr<MemoryBuffer>>(
std::unique_ptr<MemoryBuffer>)>;
- ObjectTransformLayer2(ExecutionSession &ES, ObjectLayer &BaseLayer,
- TransformFunction Transform);
+ ObjectTransformLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ TransformFunction Transform);
- void emit(MaterializationResponsibility R, VModuleKey K,
+ void emit(MaterializationResponsibility R,
std::unique_ptr<MemoryBuffer> O) override;
private:
@@ -46,11 +46,11 @@ private:
/// immediately applies the user supplied functor to each object, then adds
/// the set of transformed objects to the layer below.
template <typename BaseLayerT, typename TransformFtor>
-class ObjectTransformLayer {
+class LegacyObjectTransformLayer {
public:
/// Construct an ObjectTransformLayer with the given BaseLayer
- ObjectTransformLayer(BaseLayerT &BaseLayer,
- TransformFtor Transform = TransformFtor())
+ LegacyObjectTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
: BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
/// Apply the transform functor to each object in the object set, then
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
index 581c598aff62..a70fc373713d 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -238,7 +238,78 @@ public:
unsigned MinStubs, void *InitialPtrVal);
};
-} // end namespace orc
-} // end namespace llvm
+// @brief Mips32 support.
+//
+// Mips32 supports lazy JITing.
+class OrcMips32_Base {
+public:
+ static const unsigned PointerSize = 4;
+ static const unsigned TrampolineSize = 20;
+ static const unsigned ResolverCodeSize = 0xfc;
+ using IndirectStubsInfo = GenericIndirectStubsInfo<16>;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,unsigned NumTrampolines);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,void *CallbackMgr, bool isBigEndian);
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on Mips32, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,unsigned MinStubs, void *InitialPtrVal);
+};
+
+
+class OrcMips32Le : public OrcMips32_Base {
+public:
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,void *CallbackMgr)
+ { OrcMips32_Base::writeResolverCode(ResolveMem, Reentry, CallbackMgr, false); }
+};
+
+class OrcMips32Be : public OrcMips32_Base {
+public:
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,void *CallbackMgr)
+ { OrcMips32_Base::writeResolverCode(ResolveMem, Reentry, CallbackMgr, true); }
+};
+
+// @brief Mips64 support.
+//
+// Mips64 supports lazy JITing.
+class OrcMips64 {
+public:
+ static const unsigned PointerSize = 8;
+ static const unsigned TrampolineSize = 40;
+ static const unsigned ResolverCodeSize = 0x120;
+
+ using IndirectStubsInfo = GenericIndirectStubsInfo<32>;
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,void *CallbackMgr);
+
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,unsigned NumTrampolines);
+
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on Mips64, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,unsigned MinStubs, void *InitialPtrVal);
+};
+ } // end namespace orc
+ } // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
index 45f95f63e70f..3e07f5cf3742 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -118,30 +118,33 @@ public:
Unmapped.back().RemoteCodeAddr =
Client.reserveMem(Id, CodeSize, CodeAlign);
- LLVM_DEBUG(dbgs() << " code: "
- << format("0x%016x", Unmapped.back().RemoteCodeAddr)
- << " (" << CodeSize << " bytes, alignment "
- << CodeAlign << ")\n");
+ LLVM_DEBUG(
+ dbgs() << " code: "
+ << format("0x%016" PRIx64, Unmapped.back().RemoteCodeAddr)
+ << " (" << CodeSize << " bytes, alignment " << CodeAlign
+ << ")\n");
}
if (RODataSize != 0) {
Unmapped.back().RemoteRODataAddr =
Client.reserveMem(Id, RODataSize, RODataAlign);
- LLVM_DEBUG(dbgs() << " ro-data: "
- << format("0x%016x", Unmapped.back().RemoteRODataAddr)
- << " (" << RODataSize << " bytes, alignment "
- << RODataAlign << ")\n");
+ LLVM_DEBUG(
+ dbgs() << " ro-data: "
+ << format("0x%016" PRIx64, Unmapped.back().RemoteRODataAddr)
+ << " (" << RODataSize << " bytes, alignment " << RODataAlign
+ << ")\n");
}
if (RWDataSize != 0) {
Unmapped.back().RemoteRWDataAddr =
Client.reserveMem(Id, RWDataSize, RWDataAlign);
- LLVM_DEBUG(dbgs() << " rw-data: "
- << format("0x%016x", Unmapped.back().RemoteRWDataAddr)
- << " (" << RWDataSize << " bytes, alignment "
- << RWDataAlign << ")\n");
+ LLVM_DEBUG(
+ dbgs() << " rw-data: "
+ << format("0x%016" PRIx64, Unmapped.back().RemoteRWDataAddr)
+ << " (" << RWDataSize << " bytes, alignment " << RWDataAlign
+ << ")\n");
}
}
@@ -269,9 +272,9 @@ public:
for (auto &Alloc : Allocs) {
NextAddr = alignTo(NextAddr, Alloc.getAlign());
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextAddr);
- LLVM_DEBUG(dbgs() << " "
- << static_cast<void *>(Alloc.getLocalAddress())
- << " -> " << format("0x%016x", NextAddr) << "\n");
+ LLVM_DEBUG(
+ dbgs() << " " << static_cast<void *>(Alloc.getLocalAddress())
+ << " -> " << format("0x%016" PRIx64, NextAddr) << "\n");
Alloc.setRemoteAddress(NextAddr);
// Only advance NextAddr if it was non-null to begin with,
@@ -293,7 +296,7 @@ public:
LLVM_DEBUG(dbgs() << " copying section: "
<< static_cast<void *>(Alloc.getLocalAddress())
<< " -> "
- << format("0x%016x", Alloc.getRemoteAddress())
+ << format("0x%016" PRIx64, Alloc.getRemoteAddress())
<< " (" << Alloc.getSize() << " bytes)\n";);
if (Client.writeMem(Alloc.getRemoteAddress(), Alloc.getLocalAddress(),
@@ -306,7 +309,8 @@ public:
<< (Permissions & sys::Memory::MF_WRITE ? 'W' : '-')
<< (Permissions & sys::Memory::MF_EXEC ? 'X' : '-')
<< " permissions on block: "
- << format("0x%016x", RemoteSegmentAddr) << "\n");
+ << format("0x%016" PRIx64, RemoteSegmentAddr)
+ << "\n");
if (Client.setProtections(Id, RemoteSegmentAddr, Permissions))
return true;
}
@@ -446,16 +450,24 @@ public:
StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
};
- /// Remote compile callback manager.
- class RemoteCompileCallbackManager : public JITCompileCallbackManager {
+ class RemoteTrampolinePool : public TrampolinePool {
public:
- RemoteCompileCallbackManager(OrcRemoteTargetClient &Client,
- ExecutionSession &ES,
- JITTargetAddress ErrorHandlerAddress)
- : JITCompileCallbackManager(ES, ErrorHandlerAddress), Client(Client) {}
+ RemoteTrampolinePool(OrcRemoteTargetClient &Client) : Client(Client) {}
+
+ Expected<JITTargetAddress> getTrampoline() override {
+ std::lock_guard<std::mutex> Lock(RTPMutex);
+ if (AvailableTrampolines.empty()) {
+ if (auto Err = grow())
+ return std::move(Err);
+ }
+ assert(!AvailableTrampolines.empty() && "Failed to grow trampoline pool");
+ auto TrampolineAddr = AvailableTrampolines.back();
+ AvailableTrampolines.pop_back();
+ return TrampolineAddr;
+ }
private:
- Error grow() override {
+ Error grow() {
JITTargetAddress BlockAddr = 0;
uint32_t NumTrampolines = 0;
if (auto TrampolineInfoOrErr = Client.emitTrampolineBlock())
@@ -470,7 +482,20 @@ public:
return Error::success();
}
+ std::mutex RTPMutex;
OrcRemoteTargetClient &Client;
+ std::vector<JITTargetAddress> AvailableTrampolines;
+ };
+
+ /// Remote compile callback manager.
+ class RemoteCompileCallbackManager : public JITCompileCallbackManager {
+ public:
+ RemoteCompileCallbackManager(OrcRemoteTargetClient &Client,
+ ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress)
+ : JITCompileCallbackManager(
+ llvm::make_unique<RemoteTrampolinePool>(Client), ES,
+ ErrorHandlerAddress) {}
};
/// Create an OrcRemoteTargetClient.
@@ -489,8 +514,8 @@ public:
/// Call the int(void) function at the given address in the target and return
/// its result.
Expected<int> callIntVoid(JITTargetAddress Addr) {
- LLVM_DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Calling int(*)(void) "
+ << format("0x%016" PRIx64, Addr) << "\n");
return callB<exec::CallIntVoid>(Addr);
}
@@ -499,15 +524,15 @@ public:
Expected<int> callMain(JITTargetAddress Addr,
const std::vector<std::string> &Args) {
LLVM_DEBUG(dbgs() << "Calling int(*)(int, char*[]) "
- << format("0x%016x", Addr) << "\n");
+ << format("0x%016" PRIx64, Addr) << "\n");
return callB<exec::CallMain>(Addr, Args);
}
/// Call the void() function at the given address in the target and wait for
/// it to finish.
Error callVoidVoid(JITTargetAddress Addr) {
- LLVM_DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Calling void(*)(void) "
+ << format("0x%016" PRIx64, Addr) << "\n");
return callB<exec::CallVoidVoid>(Addr);
}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
index bc0da0f9a730..8db9e317a18a 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
@@ -87,8 +87,7 @@ class SerializationTraits<ChannelT, JITSymbolFlags> {
public:
static Error serialize(ChannelT &C, const JITSymbolFlags &Flags) {
- return serializeSeq(C, static_cast<JITSymbolFlags::UnderlyingType>(Flags),
- Flags.getTargetFlags());
+ return serializeSeq(C, Flags.getRawFlagsValue(), Flags.getTargetFlags());
}
static Error deserialize(ChannelT &C, JITSymbolFlags &Flags) {
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCUtils.h
index 47bd90bb1bad..953b73e10e43 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCUtils.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCUtils.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
#include <future>
@@ -207,73 +208,6 @@ private:
namespace detail {
-// FIXME: Remove MSVCPError/MSVCPExpected once MSVC's future implementation
-// supports classes without default constructors.
-#ifdef _MSC_VER
-
-namespace msvc_hacks {
-
-// Work around MSVC's future implementation's use of default constructors:
-// A default constructed value in the promise will be overwritten when the
-// real error is set - so the default constructed Error has to be checked
-// already.
-class MSVCPError : public Error {
-public:
- MSVCPError() { (void)!!*this; }
-
- MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}
-
- MSVCPError &operator=(MSVCPError Other) {
- Error::operator=(std::move(Other));
- return *this;
- }
-
- MSVCPError(Error Err) : Error(std::move(Err)) {}
-};
-
-// Work around MSVC's future implementation, similar to MSVCPError.
-template <typename T> class MSVCPExpected : public Expected<T> {
-public:
- MSVCPExpected()
- : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
- consumeError(this->takeError());
- }
-
- MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}
-
- MSVCPExpected &operator=(MSVCPExpected &&Other) {
- Expected<T>::operator=(std::move(Other));
- return *this;
- }
-
- MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}
-
- template <typename OtherT>
- MSVCPExpected(
- OtherT &&Val,
- typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
- nullptr)
- : Expected<T>(std::move(Val)) {}
-
- template <class OtherT>
- MSVCPExpected(
- Expected<OtherT> &&Other,
- typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
- nullptr)
- : Expected<T>(std::move(Other)) {}
-
- template <class OtherT>
- explicit MSVCPExpected(
- Expected<OtherT> &&Other,
- typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
- nullptr)
- : Expected<T>(std::move(Other)) {}
-};
-
-} // end namespace msvc_hacks
-
-#endif // _MSC_VER
-
/// Provides a typedef for a tuple containing the decayed argument types.
template <typename T> class FunctionArgsTuple;
@@ -293,10 +227,10 @@ public:
#ifdef _MSC_VER
// The ErrorReturnType wrapped in a std::promise.
- using ReturnPromiseType = std::promise<msvc_hacks::MSVCPExpected<RetT>>;
+ using ReturnPromiseType = std::promise<MSVCPExpected<RetT>>;
// The ErrorReturnType wrapped in a std::future.
- using ReturnFutureType = std::future<msvc_hacks::MSVCPExpected<RetT>>;
+ using ReturnFutureType = std::future<MSVCPExpected<RetT>>;
#else
// The ErrorReturnType wrapped in a std::promise.
using ReturnPromiseType = std::promise<ErrorReturnType>;
@@ -325,10 +259,10 @@ public:
#ifdef _MSC_VER
// The ErrorReturnType wrapped in a std::promise.
- using ReturnPromiseType = std::promise<msvc_hacks::MSVCPError>;
+ using ReturnPromiseType = std::promise<MSVCPError>;
// The ErrorReturnType wrapped in a std::future.
- using ReturnFutureType = std::future<msvc_hacks::MSVCPError>;
+ using ReturnFutureType = std::future<MSVCPError>;
#else
// The ErrorReturnType wrapped in a std::promise.
using ReturnPromiseType = std::promise<ErrorReturnType>;
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 48b3f7a58ed7..6f90f0380d95 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -36,7 +36,7 @@
namespace llvm {
namespace orc {
-class RTDyldObjectLinkingLayer2 : public ObjectLayer {
+class RTDyldObjectLinkingLayer : public ObjectLayer {
public:
/// Functor for receiving object-loaded notifications.
using NotifyLoadedFunction =
@@ -44,48 +44,84 @@ public:
const RuntimeDyld::LoadedObjectInfo &)>;
/// Functor for receiving finalization notifications.
- using NotifyFinalizedFunction = std::function<void(VModuleKey)>;
+ using NotifyEmittedFunction = std::function<void(VModuleKey)>;
using GetMemoryManagerFunction =
- std::function<std::shared_ptr<RuntimeDyld::MemoryManager>(VModuleKey)>;
+ std::function<std::unique_ptr<RuntimeDyld::MemoryManager>()>;
/// Construct an ObjectLinkingLayer with the given NotifyLoaded,
- /// and NotifyFinalized functors.
- RTDyldObjectLinkingLayer2(
+ /// and NotifyEmitted functors.
+ RTDyldObjectLinkingLayer(
ExecutionSession &ES, GetMemoryManagerFunction GetMemoryManager,
NotifyLoadedFunction NotifyLoaded = NotifyLoadedFunction(),
- NotifyFinalizedFunction NotifyFinalized = NotifyFinalizedFunction());
+ NotifyEmittedFunction NotifyEmitted = NotifyEmittedFunction());
/// Emit the object.
- void emit(MaterializationResponsibility R, VModuleKey K,
+ void emit(MaterializationResponsibility R,
std::unique_ptr<MemoryBuffer> O) override;
- /// Map section addresses for the object associated with the
- /// VModuleKey K.
- void mapSectionAddress(VModuleKey K, const void *LocalAddress,
- JITTargetAddress TargetAddr) const;
-
/// Set the 'ProcessAllSections' flag.
///
/// If set to true, all sections in each object file will be allocated using
/// the memory manager, rather than just the sections required for execution.
///
/// This is kludgy, and may be removed in the future.
- void setProcessAllSections(bool ProcessAllSections) {
+ RTDyldObjectLinkingLayer &setProcessAllSections(bool ProcessAllSections) {
this->ProcessAllSections = ProcessAllSections;
+ return *this;
+ }
+
+ /// Instructs this RTDyldLinkingLayer2 instance to override the symbol flags
+ /// returned by RuntimeDyld for any given object file with the flags supplied
+ /// by the MaterializationResponsibility instance. This is a workaround to
+ /// support symbol visibility in COFF, which does not use the libObject's
+ /// SF_Exported flag. Use only when generating / adding COFF object files.
+ ///
+ /// FIXME: We should be able to remove this if/when COFF properly tracks
+ /// exported symbols.
+ RTDyldObjectLinkingLayer &
+ setOverrideObjectFlagsWithResponsibilityFlags(bool OverrideObjectFlags) {
+ this->OverrideObjectFlags = OverrideObjectFlags;
+ return *this;
+ }
+
+ /// If set, this RTDyldObjectLinkingLayer instance will claim responsibility
+ /// for any symbols provided by a given object file that were not already in
+ /// the MaterializationResponsibility instance. Setting this flag allows
+ /// higher-level program representations (e.g. LLVM IR) to be added based on
+ /// only a subset of the symbols they provide, without having to write
+ /// intervening layers to scan and add the additional symbols. This trades
+ /// diagnostic quality for convenience however: If all symbols are enumerated
+ /// up-front then clashes can be detected and reported early (and usually
+ /// deterministically). If this option is set, clashes for the additional
+ /// symbols may not be detected until late, and detection may depend on
+ /// the flow of control through JIT'd code. Use with care.
+ RTDyldObjectLinkingLayer &
+ setAutoClaimResponsibilityForObjectSymbols(bool AutoClaimObjectSymbols) {
+ this->AutoClaimObjectSymbols = AutoClaimObjectSymbols;
+ return *this;
}
private:
+ Error onObjLoad(VModuleKey K, MaterializationResponsibility &R,
+ object::ObjectFile &Obj,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> Resolved,
+ std::set<StringRef> &InternalSymbols);
+
+ void onObjEmit(VModuleKey K, MaterializationResponsibility &R, Error Err);
+
mutable std::mutex RTDyldLayerMutex;
GetMemoryManagerFunction GetMemoryManager;
NotifyLoadedFunction NotifyLoaded;
- NotifyFinalizedFunction NotifyFinalized;
- bool ProcessAllSections;
- std::map<VModuleKey, RuntimeDyld *> ActiveRTDylds;
- std::map<VModuleKey, std::shared_ptr<RuntimeDyld::MemoryManager>> MemMgrs;
+ NotifyEmittedFunction NotifyEmitted;
+ bool ProcessAllSections = false;
+ bool OverrideObjectFlags = false;
+ bool AutoClaimObjectSymbols = false;
+ std::vector<std::unique_ptr<RuntimeDyld::MemoryManager>> MemMgrs;
};
-class RTDyldObjectLinkingLayerBase {
+class LegacyRTDyldObjectLinkingLayerBase {
public:
using ObjectPtr = std::unique_ptr<MemoryBuffer>;
@@ -137,10 +173,10 @@ protected:
/// object files to be loaded into memory, linked, and the addresses of their
/// symbols queried. All objects added to this layer can see each other's
/// symbols.
-class RTDyldObjectLinkingLayer : public RTDyldObjectLinkingLayerBase {
+class LegacyRTDyldObjectLinkingLayer : public LegacyRTDyldObjectLinkingLayerBase {
public:
- using RTDyldObjectLinkingLayerBase::ObjectPtr;
+ using LegacyRTDyldObjectLinkingLayerBase::ObjectPtr;
/// Functor for receiving object-loaded notifications.
using NotifyLoadedFtor =
@@ -161,7 +197,7 @@ private:
template <typename MemoryManagerPtrT>
class ConcreteLinkedObject : public LinkedObject {
public:
- ConcreteLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+ ConcreteLinkedObject(LegacyRTDyldObjectLinkingLayer &Parent, VModuleKey K,
OwnedObject Obj, MemoryManagerPtrT MemMgr,
std::shared_ptr<SymbolResolver> Resolver,
bool ProcessAllSections)
@@ -175,7 +211,7 @@ private:
}
~ConcreteLinkedObject() override {
- if (this->Parent.NotifyFreed)
+ if (this->Parent.NotifyFreed && ObjForNotify.getBinary())
this->Parent.NotifyFreed(K, *ObjForNotify.getBinary());
MemMgr->deregisterEHFrames();
@@ -249,9 +285,14 @@ private:
consumeError(SymbolName.takeError());
continue;
}
+ // FIXME: Raise an error for bad symbols.
auto Flags = JITSymbolFlags::fromObjectSymbol(Symbol);
+ if (!Flags) {
+ consumeError(Flags.takeError());
+ continue;
+ }
SymbolTable.insert(
- std::make_pair(*SymbolName, JITEvaluatedSymbol(0, Flags)));
+ std::make_pair(*SymbolName, JITEvaluatedSymbol(0, *Flags)));
}
}
@@ -272,7 +313,7 @@ private:
};
VModuleKey K;
- RTDyldObjectLinkingLayer &Parent;
+ LegacyRTDyldObjectLinkingLayer &Parent;
MemoryManagerPtrT MemMgr;
OwnedObject ObjForNotify;
std::unique_ptr<PreFinalizeContents> PFC;
@@ -280,7 +321,7 @@ private:
template <typename MemoryManagerPtrT>
std::unique_ptr<ConcreteLinkedObject<MemoryManagerPtrT>>
- createLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+ createLinkedObject(LegacyRTDyldObjectLinkingLayer &Parent, VModuleKey K,
OwnedObject Obj, MemoryManagerPtrT MemMgr,
std::shared_ptr<SymbolResolver> Resolver,
bool ProcessAllSections) {
@@ -300,7 +341,7 @@ public:
/// Construct an ObjectLinkingLayer with the given NotifyLoaded,
/// and NotifyFinalized functors.
- RTDyldObjectLinkingLayer(
+ LegacyRTDyldObjectLinkingLayer(
ExecutionSession &ES, ResourcesGetter GetResources,
NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor(),
@@ -402,11 +443,14 @@ public:
private:
ExecutionSession &ES;
- std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
ResourcesGetter GetResources;
NotifyLoadedFtor NotifyLoaded;
NotifyFinalizedFtor NotifyFinalized;
NotifyFreedFtor NotifyFreed;
+
+ // NB! `LinkedObjects` needs to be destroyed before `NotifyFreed` because
+ // `~ConcreteLinkedObject` calls `NotifyFreed`
+ std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
bool ProcessAllSections = false;
};
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
index 4c45cfd199dd..717076e25609 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
@@ -14,6 +14,7 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
#define LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include <atomic>
#include <mutex>
@@ -49,10 +50,13 @@ private:
/// Pointer to a pooled string representing a symbol name.
class SymbolStringPtr {
friend class SymbolStringPool;
+ friend struct DenseMapInfo<SymbolStringPtr>;
friend bool operator==(const SymbolStringPtr &LHS,
const SymbolStringPtr &RHS);
friend bool operator<(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS);
+ static SymbolStringPool::PoolMapEntry Tombstone;
+
public:
SymbolStringPtr() = default;
SymbolStringPtr(const SymbolStringPtr &Other)
@@ -142,6 +146,29 @@ inline bool SymbolStringPool::empty() const {
}
} // end namespace orc
+
+template <>
+struct DenseMapInfo<orc::SymbolStringPtr> {
+
+ static orc::SymbolStringPtr getEmptyKey() {
+ return orc::SymbolStringPtr();
+ }
+
+ static orc::SymbolStringPtr getTombstoneKey() {
+ return orc::SymbolStringPtr(&orc::SymbolStringPtr::Tombstone);
+ }
+
+ static unsigned getHashValue(orc::SymbolStringPtr V) {
+ uintptr_t IV = reinterpret_cast<uintptr_t>(V.S);
+ return unsigned(IV) ^ unsigned(IV >> 9);
+ }
+
+ static bool isEqual(const orc::SymbolStringPtr &LHS,
+ const orc::SymbolStringPtr &RHS) {
+ return LHS.S == RHS.S;
+ }
+};
+
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h
new file mode 100644
index 000000000000..bf946de532d3
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ThreadSafeModule.h
@@ -0,0 +1,163 @@
+//===----------- ThreadSafeModule.h -- Layer interfaces ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Thread safe wrappers and utilities for Module and LLVMContext.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULEWRAPPER_H
+#define LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULEWRAPPER_H
+
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Compiler.h"
+
+#include <functional>
+#include <memory>
+#include <mutex>
+
+namespace llvm {
+namespace orc {
+
+/// An LLVMContext together with an associated mutex that can be used to lock
+/// the context to prevent concurrent access by other threads.
+class ThreadSafeContext {
+private:
+ struct State {
+ State(std::unique_ptr<LLVMContext> Ctx) : Ctx(std::move(Ctx)) {}
+
+ std::unique_ptr<LLVMContext> Ctx;
+ std::recursive_mutex Mutex;
+ };
+
+public:
+ // RAII based lock for ThreadSafeContext.
+ class LLVM_NODISCARD Lock {
+ private:
+ using UnderlyingLock = std::lock_guard<std::recursive_mutex>;
+
+ public:
+ Lock(std::shared_ptr<State> S)
+ : S(std::move(S)),
+ L(llvm::make_unique<UnderlyingLock>(this->S->Mutex)) {}
+
+ private:
+ std::shared_ptr<State> S;
+ std::unique_ptr<UnderlyingLock> L;
+ };
+
+ /// Construct a null context.
+ ThreadSafeContext() = default;
+
+ /// Construct a ThreadSafeContext from the given LLVMContext.
+ ThreadSafeContext(std::unique_ptr<LLVMContext> NewCtx)
+ : S(std::make_shared<State>(std::move(NewCtx))) {
+ assert(S->Ctx != nullptr &&
+ "Can not construct a ThreadSafeContext from a nullptr");
+ }
+
+ /// Returns a pointer to the LLVMContext that was used to construct this
+ /// instance, or null if the instance was default constructed.
+ LLVMContext *getContext() { return S ? S->Ctx.get() : nullptr; }
+
+ /// Returns a pointer to the LLVMContext that was used to construct this
+ /// instance, or null if the instance was default constructed.
+ const LLVMContext *getContext() const { return S ? S->Ctx.get() : nullptr; }
+
+ Lock getLock() {
+ assert(S && "Can not lock an empty ThreadSafeContext");
+ return Lock(S);
+ }
+
+private:
+ std::shared_ptr<State> S;
+};
+
+/// An LLVM Module together with a shared ThreadSafeContext.
+class ThreadSafeModule {
+public:
+ /// Default construct a ThreadSafeModule. This results in a null module and
+ /// null context.
+ ThreadSafeModule() = default;
+
+ ThreadSafeModule(ThreadSafeModule &&Other) = default;
+
+ ThreadSafeModule &operator=(ThreadSafeModule &&Other) {
+ // We have to explicitly define this move operator to copy the fields in
+ // reverse order (i.e. module first) to ensure the dependencies are
+ // protected: The old module that is being overwritten must be destroyed
+ // *before* the context that it depends on.
+ // We also need to lock the context to make sure the module tear-down
+ // does not overlap any other work on the context.
+ if (M) {
+ auto L = getContextLock();
+ M = nullptr;
+ }
+ M = std::move(Other.M);
+ TSCtx = std::move(Other.TSCtx);
+ return *this;
+ }
+
+ /// Construct a ThreadSafeModule from a unique_ptr<Module> and a
+ /// unique_ptr<LLVMContext>. This creates a new ThreadSafeContext from the
+ /// given context.
+ ThreadSafeModule(std::unique_ptr<Module> M, std::unique_ptr<LLVMContext> Ctx)
+ : M(std::move(M)), TSCtx(std::move(Ctx)) {}
+
+ /// Construct a ThreadSafeModule from a unique_ptr<Module> and an
+ /// existing ThreadSafeContext.
+ ThreadSafeModule(std::unique_ptr<Module> M, ThreadSafeContext TSCtx)
+ : M(std::move(M)), TSCtx(std::move(TSCtx)) {}
+
+ ~ThreadSafeModule() {
+ // We need to lock the context while we destruct the module.
+ if (M) {
+ auto L = getContextLock();
+ M = nullptr;
+ }
+ }
+
+ /// Get the module wrapped by this ThreadSafeModule.
+ Module *getModule() { return M.get(); }
+
+ /// Get the module wrapped by this ThreadSafeModule.
+ const Module *getModule() const { return M.get(); }
+
+ /// Take out a lock on the ThreadSafeContext for this module.
+ ThreadSafeContext::Lock getContextLock() { return TSCtx.getLock(); }
+
+ /// Boolean conversion: This ThreadSafeModule will evaluate to true if it
+ /// wraps a non-null module.
+ explicit operator bool() {
+ if (M) {
+ assert(TSCtx.getContext() &&
+ "Non-null module must have non-null context");
+ return true;
+ }
+ return false;
+ }
+
+private:
+ std::unique_ptr<Module> M;
+ ThreadSafeContext TSCtx;
+};
+
+using GVPredicate = std::function<bool(const GlobalValue &)>;
+using GVModifier = std::function<void(GlobalValue &)>;
+
+/// Clones the given module on to a new context.
+ThreadSafeModule
+cloneToNewContext(ThreadSafeModule &TSMW,
+ GVPredicate ShouldCloneDef = GVPredicate(),
+ GVModifier UpdateClonedDefSource = GVModifier());
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULEWRAPPER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
index 5dd5add1bb39..e419ee05e566 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -250,6 +250,16 @@ public:
void finalizeWithMemoryManagerLocking();
private:
+ friend void
+ jitLinkForORC(object::ObjectFile &Obj,
+ std::unique_ptr<MemoryBuffer> UnderlyingBuffer,
+ RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ std::function<Error(std::unique_ptr<LoadedObjectInfo>,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ std::function<void(Error)> OnEmitted);
+
// RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
// interface.
std::unique_ptr<RuntimeDyldImpl> Dyld;
@@ -259,6 +269,21 @@ private:
RuntimeDyldCheckerImpl *Checker;
};
+// Asynchronous JIT link for ORC.
+//
+// Warning: This API is experimental and probably should not be used by anyone
+// but ORC's RTDyldObjectLinkingLayer2. Internally it constructs a RuntimeDyld
+// instance and uses continuation passing to perform the fix-up and finalize
+// steps asynchronously.
+void jitLinkForORC(object::ObjectFile &Obj,
+ std::unique_ptr<MemoryBuffer> UnderlyingBuffer,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ std::function<Error(std::unique_ptr<LoadedObjectInfo>,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ std::function<void(Error)> OnEmitted);
+
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
diff --git a/contrib/llvm/include/llvm/IR/Attributes.h b/contrib/llvm/include/llvm/IR/Attributes.h
index 5aaaaf3c396b..9fc4614af010 100644
--- a/contrib/llvm/include/llvm/IR/Attributes.h
+++ b/contrib/llvm/include/llvm/IR/Attributes.h
@@ -230,29 +230,33 @@ public:
/// Add an argument attribute. Returns a new set because attribute sets are
/// immutable.
- AttributeSet addAttribute(LLVMContext &C, Attribute::AttrKind Kind) const;
+ LLVM_NODISCARD AttributeSet addAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const;
/// Add a target-dependent attribute. Returns a new set because attribute sets
/// are immutable.
- AttributeSet addAttribute(LLVMContext &C, StringRef Kind,
- StringRef Value = StringRef()) const;
+ LLVM_NODISCARD AttributeSet addAttribute(LLVMContext &C, StringRef Kind,
+ StringRef Value = StringRef()) const;
/// Add attributes to the attribute set. Returns a new set because attribute
/// sets are immutable.
- AttributeSet addAttributes(LLVMContext &C, AttributeSet AS) const;
+ LLVM_NODISCARD AttributeSet addAttributes(LLVMContext &C,
+ AttributeSet AS) const;
/// Remove the specified attribute from this set. Returns a new set because
/// attribute sets are immutable.
- AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const;
+ LLVM_NODISCARD AttributeSet removeAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const;
/// Remove the specified attribute from this set. Returns a new set because
/// attribute sets are immutable.
- AttributeSet removeAttribute(LLVMContext &C, StringRef Kind) const;
+ LLVM_NODISCARD AttributeSet removeAttribute(LLVMContext &C,
+ StringRef Kind) const;
/// Remove the specified attributes from this set. Returns a new set because
/// attribute sets are immutable.
- AttributeSet removeAttributes(LLVMContext &C,
- const AttrBuilder &AttrsToRemove) const;
+ LLVM_NODISCARD AttributeSet
+ removeAttributes(LLVMContext &C, const AttrBuilder &AttrsToRemove) const;
/// Return the number of attributes in this set.
unsigned getNumAttributes() const;
@@ -375,133 +379,140 @@ public:
/// Add an attribute to the attribute set at the given index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAttribute(LLVMContext &C, unsigned Index,
- Attribute::AttrKind Kind) const;
+ LLVM_NODISCARD AttributeList addAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const;
/// Add an attribute to the attribute set at the given index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAttribute(LLVMContext &C, unsigned Index, StringRef Kind,
- StringRef Value = StringRef()) const;
+ LLVM_NODISCARD AttributeList
+ addAttribute(LLVMContext &C, unsigned Index, StringRef Kind,
+ StringRef Value = StringRef()) const;
/// Add an attribute to the attribute set at the given index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAttribute(LLVMContext &C, unsigned Index, Attribute A) const;
+ LLVM_NODISCARD AttributeList addAttribute(LLVMContext &C, unsigned Index,
+ Attribute A) const;
/// Add attributes to the attribute set at the given index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAttributes(LLVMContext &C, unsigned Index,
- const AttrBuilder &B) const;
+ LLVM_NODISCARD AttributeList addAttributes(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) const;
/// Add an argument attribute to the list. Returns a new list because
/// attribute lists are immutable.
- AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo,
- Attribute::AttrKind Kind) const {
+ LLVM_NODISCARD AttributeList addParamAttribute(
+ LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const {
return addAttribute(C, ArgNo + FirstArgIndex, Kind);
}
/// Add an argument attribute to the list. Returns a new list because
/// attribute lists are immutable.
- AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo,
- StringRef Kind,
- StringRef Value = StringRef()) const {
+ LLVM_NODISCARD AttributeList
+ addParamAttribute(LLVMContext &C, unsigned ArgNo, StringRef Kind,
+ StringRef Value = StringRef()) const {
return addAttribute(C, ArgNo + FirstArgIndex, Kind, Value);
}
/// Add an attribute to the attribute list at the given arg indices. Returns a
/// new list because attribute lists are immutable.
- AttributeList addParamAttribute(LLVMContext &C, ArrayRef<unsigned> ArgNos,
- Attribute A) const;
+ LLVM_NODISCARD AttributeList addParamAttribute(LLVMContext &C,
+ ArrayRef<unsigned> ArgNos,
+ Attribute A) const;
/// Add an argument attribute to the list. Returns a new list because
/// attribute lists are immutable.
- AttributeList addParamAttributes(LLVMContext &C, unsigned ArgNo,
- const AttrBuilder &B) const {
+ LLVM_NODISCARD AttributeList addParamAttributes(LLVMContext &C,
+ unsigned ArgNo,
+ const AttrBuilder &B) const {
return addAttributes(C, ArgNo + FirstArgIndex, B);
}
/// Remove the specified attribute at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeAttribute(LLVMContext &C, unsigned Index,
- Attribute::AttrKind Kind) const;
+ LLVM_NODISCARD AttributeList removeAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const;
/// Remove the specified attribute at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeAttribute(LLVMContext &C, unsigned Index,
- StringRef Kind) const;
+ LLVM_NODISCARD AttributeList removeAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind) const;
/// Remove the specified attributes at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeAttributes(LLVMContext &C, unsigned Index,
- const AttrBuilder &AttrsToRemove) const;
+ LLVM_NODISCARD AttributeList removeAttributes(
+ LLVMContext &C, unsigned Index, const AttrBuilder &AttrsToRemove) const;
/// Remove all attributes at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeAttributes(LLVMContext &C, unsigned Index) const;
+ LLVM_NODISCARD AttributeList removeAttributes(LLVMContext &C,
+ unsigned Index) const;
/// Remove the specified attribute at the specified arg index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeParamAttribute(LLVMContext &C, unsigned ArgNo,
- Attribute::AttrKind Kind) const {
+ LLVM_NODISCARD AttributeList removeParamAttribute(
+ LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const {
return removeAttribute(C, ArgNo + FirstArgIndex, Kind);
}
/// Remove the specified attribute at the specified arg index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeParamAttribute(LLVMContext &C, unsigned ArgNo,
- StringRef Kind) const {
+ LLVM_NODISCARD AttributeList removeParamAttribute(LLVMContext &C,
+ unsigned ArgNo,
+ StringRef Kind) const {
return removeAttribute(C, ArgNo + FirstArgIndex, Kind);
}
/// Remove the specified attribute at the specified arg index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo,
- const AttrBuilder &AttrsToRemove) const {
+ LLVM_NODISCARD AttributeList removeParamAttributes(
+ LLVMContext &C, unsigned ArgNo, const AttrBuilder &AttrsToRemove) const {
return removeAttributes(C, ArgNo + FirstArgIndex, AttrsToRemove);
}
/// Remove all attributes at the specified arg index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo) const {
+ LLVM_NODISCARD AttributeList removeParamAttributes(LLVMContext &C,
+ unsigned ArgNo) const {
return removeAttributes(C, ArgNo + FirstArgIndex);
}
/// \brief Add the dereferenceable attribute to the attribute set at the given
/// index. Returns a new list because attribute lists are immutable.
- AttributeList addDereferenceableAttr(LLVMContext &C, unsigned Index,
- uint64_t Bytes) const;
+ LLVM_NODISCARD AttributeList addDereferenceableAttr(LLVMContext &C,
+ unsigned Index,
+ uint64_t Bytes) const;
/// \brief Add the dereferenceable attribute to the attribute set at the given
/// arg index. Returns a new list because attribute lists are immutable.
- AttributeList addDereferenceableParamAttr(LLVMContext &C, unsigned ArgNo,
- uint64_t Bytes) const {
+ LLVM_NODISCARD AttributeList addDereferenceableParamAttr(
+ LLVMContext &C, unsigned ArgNo, uint64_t Bytes) const {
return addDereferenceableAttr(C, ArgNo + FirstArgIndex, Bytes);
}
/// Add the dereferenceable_or_null attribute to the attribute set at
/// the given index. Returns a new list because attribute lists are immutable.
- AttributeList addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index,
- uint64_t Bytes) const;
+ LLVM_NODISCARD AttributeList addDereferenceableOrNullAttr(
+ LLVMContext &C, unsigned Index, uint64_t Bytes) const;
/// Add the dereferenceable_or_null attribute to the attribute set at
/// the given arg index. Returns a new list because attribute lists are
/// immutable.
- AttributeList addDereferenceableOrNullParamAttr(LLVMContext &C,
- unsigned ArgNo,
- uint64_t Bytes) const {
+ LLVM_NODISCARD AttributeList addDereferenceableOrNullParamAttr(
+ LLVMContext &C, unsigned ArgNo, uint64_t Bytes) const {
return addDereferenceableOrNullAttr(C, ArgNo + FirstArgIndex, Bytes);
}
/// Add the allocsize attribute to the attribute set at the given index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAllocSizeAttr(LLVMContext &C, unsigned Index,
- unsigned ElemSizeArg,
- const Optional<unsigned> &NumElemsArg);
+ LLVM_NODISCARD AttributeList
+ addAllocSizeAttr(LLVMContext &C, unsigned Index, unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg);
/// Add the allocsize attribute to the attribute set at the given arg index.
/// Returns a new list because attribute lists are immutable.
- AttributeList addAllocSizeParamAttr(LLVMContext &C, unsigned ArgNo,
- unsigned ElemSizeArg,
- const Optional<unsigned> &NumElemsArg) {
+ LLVM_NODISCARD AttributeList
+ addAllocSizeParamAttr(LLVMContext &C, unsigned ArgNo, unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg) {
return addAllocSizeAttr(C, ArgNo + FirstArgIndex, ElemSizeArg, NumElemsArg);
}
diff --git a/contrib/llvm/include/llvm/IR/Attributes.td b/contrib/llvm/include/llvm/IR/Attributes.td
index 39978c41ac72..e786d85d05a8 100644
--- a/contrib/llvm/include/llvm/IR/Attributes.td
+++ b/contrib/llvm/include/llvm/IR/Attributes.td
@@ -176,6 +176,14 @@ def SanitizeMemory : EnumAttr<"sanitize_memory">;
/// HWAddressSanitizer is on.
def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress">;
+/// Speculative Load Hardening is enabled.
+///
+/// Note that this uses the default compatibility (always compatible during
+/// inlining) and a conservative merge strategy where inlining an attributed
+/// body will add the attribute to the caller. This ensures that code carrying
+/// this attribute will always be lowered with hardening enabled.
+def SpeculativeLoadHardening : EnumAttr<"speculative_load_hardening">;
+
/// Argument is swift error.
def SwiftError : EnumAttr<"swifterror">;
@@ -232,6 +240,7 @@ def : MergeRule<"setAND<UnsafeFPMathAttr>">;
def : MergeRule<"setOR<NoImplicitFloatAttr>">;
def : MergeRule<"setOR<NoJumpTablesAttr>">;
def : MergeRule<"setOR<ProfileSampleAccurateAttr>">;
+def : MergeRule<"setOR<SpeculativeLoadHardeningAttr>">;
def : MergeRule<"adjustCallerSSPLevel">;
def : MergeRule<"adjustCallerStackProbes">;
def : MergeRule<"adjustCallerStackProbeSize">;
diff --git a/contrib/llvm/include/llvm/IR/BasicBlock.h b/contrib/llvm/include/llvm/IR/BasicBlock.h
index 1ee19975af75..99eac33f742e 100644
--- a/contrib/llvm/include/llvm/IR/BasicBlock.h
+++ b/contrib/llvm/include/llvm/IR/BasicBlock.h
@@ -38,7 +38,6 @@ class LandingPadInst;
class LLVMContext;
class Module;
class PHINode;
-class TerminatorInst;
class ValueSymbolTable;
/// LLVM Basic Block Representation
@@ -50,12 +49,12 @@ class ValueSymbolTable;
/// represents a label to which a branch can jump.
///
/// A well formed basic block is formed of a list of non-terminating
-/// instructions followed by a single TerminatorInst instruction.
-/// TerminatorInst's may not occur in the middle of basic blocks, and must
-/// terminate the blocks. The BasicBlock class allows malformed basic blocks to
-/// occur because it may be useful in the intermediate stage of constructing or
-/// modifying a program. However, the verifier will ensure that basic blocks
-/// are "well formed".
+/// instructions followed by a single terminator instruction. Terminator
+/// instructions may not occur in the middle of basic blocks, and must terminate
+/// the blocks. The BasicBlock class allows malformed basic blocks to occur
+/// because it may be useful in the intermediate stage of constructing or
+/// modifying a program. However, the verifier will ensure that basic blocks are
+/// "well formed".
class BasicBlock final : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
@@ -120,10 +119,10 @@ public:
/// Returns the terminator instruction if the block is well formed or null
/// if the block is not well formed.
- const TerminatorInst *getTerminator() const LLVM_READONLY;
- TerminatorInst *getTerminator() {
- return const_cast<TerminatorInst *>(
- static_cast<const BasicBlock *>(this)->getTerminator());
+ const Instruction *getTerminator() const LLVM_READONLY;
+ Instruction *getTerminator() {
+ return const_cast<Instruction *>(
+ static_cast<const BasicBlock *>(this)->getTerminator());
}
/// Returns the call instruction calling \@llvm.experimental.deoptimize
@@ -238,6 +237,12 @@ public:
static_cast<const BasicBlock *>(this)->getUniquePredecessor());
}
+ /// Return true if this block has exactly N predecessors.
+ bool hasNPredecessors(unsigned N) const;
+
+ /// Return true if this block has N predecessors or more.
+ bool hasNPredecessorsOrMore(unsigned N) const;
+
/// Return the successor of this block if it has a single successor.
/// Otherwise return a null pointer.
///
diff --git a/contrib/llvm/include/llvm/IR/CFG.h b/contrib/llvm/include/llvm/IR/CFG.h
index f4988e7f1fec..8385c4647e12 100644
--- a/contrib/llvm/include/llvm/IR/CFG.h
+++ b/contrib/llvm/include/llvm/IR/CFG.h
@@ -1,4 +1,4 @@
-//===- CFG.h - Process LLVM structures as graphs ----------------*- C++ -*-===//
+//===- CFG.h ----------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,10 +6,15 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines specializations of GraphTraits that allow Function and
-// BasicBlock graphs to be treated as proper graphs for generic algorithms.
-//
+/// \file
+///
+/// This file provides various utilities for inspecting and working with the
+/// control flow graph in LLVM IR. This includes generic facilities for
+/// iterating successors and predecessors of basic blocks, the successors of
+/// specific terminator instructions, etc. It also defines specializations of
+/// GraphTraits that allow Function and BasicBlock graphs to be treated as
+/// proper graphs for generic algorithms.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CFG_H
@@ -44,8 +49,13 @@ class PredIterator : public std::iterator<std::forward_iterator_tag,
inline void advancePastNonTerminators() {
// Loop to ignore non-terminator uses (for example BlockAddresses).
- while (!It.atEnd() && !isa<TerminatorInst>(*It))
+ while (!It.atEnd()) {
+ if (auto *Inst = dyn_cast<Instruction>(*It))
+ if (Inst->isTerminator())
+ break;
+
++It;
+ }
}
public:
@@ -63,7 +73,7 @@ public:
inline reference operator*() const {
assert(!It.atEnd() && "pred_iterator out of range!");
- return cast<TerminatorInst>(*It)->getParent();
+ return cast<Instruction>(*It)->getParent();
}
inline pointer *operator->() const { return &operator*(); }
@@ -107,6 +117,8 @@ inline const_pred_iterator pred_end(const BasicBlock *BB) {
inline bool pred_empty(const BasicBlock *BB) {
return pred_begin(BB) == pred_end(BB);
}
+/// Get the number of predecessors of \p BB. This is a linear time operation.
+/// Use \ref BasicBlock::hasNPredecessors() or hasNPredecessorsOrMore if able.
inline unsigned pred_size(const BasicBlock *BB) {
return std::distance(pred_begin(BB), pred_end(BB));
}
@@ -118,16 +130,144 @@ inline pred_const_range predecessors(const BasicBlock *BB) {
}
//===----------------------------------------------------------------------===//
-// BasicBlock succ_iterator helpers
+// Instruction and BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//
-using succ_iterator =
- TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>;
-using succ_const_iterator =
- TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>;
+template <class InstructionT, class BlockT>
+class SuccIterator
+ : public iterator_facade_base<SuccIterator<InstructionT, BlockT>,
+ std::random_access_iterator_tag, BlockT, int,
+ BlockT *, BlockT *> {
+public:
+ using difference_type = int;
+ using pointer = BlockT *;
+ using reference = BlockT *;
+
+private:
+ InstructionT *Inst;
+ int Idx;
+ using Self = SuccIterator<InstructionT, BlockT>;
+
+ inline bool index_is_valid(int Idx) {
+ // Note that we specially support the index of zero being valid even in the
+ // face of a null instruction.
+ return Idx >= 0 && (Idx == 0 || Idx <= (int)Inst->getNumSuccessors());
+ }
+
+ /// Proxy object to allow write access in operator[]
+ class SuccessorProxy {
+ Self It;
+
+ public:
+ explicit SuccessorProxy(const Self &It) : It(It) {}
+
+ SuccessorProxy(const SuccessorProxy &) = default;
+
+ SuccessorProxy &operator=(SuccessorProxy RHS) {
+ *this = reference(RHS);
+ return *this;
+ }
+
+ SuccessorProxy &operator=(reference RHS) {
+ It.Inst->setSuccessor(It.Idx, RHS);
+ return *this;
+ }
+
+ operator reference() const { return *It; }
+ };
+
+public:
+ // begin iterator
+ explicit inline SuccIterator(InstructionT *Inst) : Inst(Inst), Idx(0) {}
+ // end iterator
+ inline SuccIterator(InstructionT *Inst, bool) : Inst(Inst) {
+ if (Inst)
+ Idx = Inst->getNumSuccessors();
+ else
+ // Inst == NULL happens, if a basic block is not fully constructed and
+ // consequently getTerminator() returns NULL. In this case we construct
+ // a SuccIterator which describes a basic block that has zero
+ // successors.
+ // Defining SuccIterator for incomplete and malformed CFGs is especially
+ // useful for debugging.
+ Idx = 0;
+ }
+
+ /// This is used to interface between code that wants to
+ /// operate on terminator instructions directly.
+ int getSuccessorIndex() const { return Idx; }
+
+ inline bool operator==(const Self &x) const { return Idx == x.Idx; }
+
+ inline BlockT *operator*() const { return Inst->getSuccessor(Idx); }
+
+ // We use the basic block pointer directly for operator->.
+ inline BlockT *operator->() const { return operator*(); }
+
+ inline bool operator<(const Self &RHS) const {
+ assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
+ return Idx < RHS.Idx;
+ }
+
+ int operator-(const Self &RHS) const {
+ assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
+ return Idx - RHS.Idx;
+ }
+
+ inline Self &operator+=(int RHS) {
+ int NewIdx = Idx + RHS;
+ assert(index_is_valid(NewIdx) && "Iterator index out of bound");
+ Idx = NewIdx;
+ return *this;
+ }
+
+ inline Self &operator-=(int RHS) { return operator+=(-RHS); }
+
+ // Specially implement the [] operation using a proxy object to support
+ // assignment.
+ inline SuccessorProxy operator[](int Offset) {
+ Self TmpIt = *this;
+ TmpIt += Offset;
+ return SuccessorProxy(TmpIt);
+ }
+
+ /// Get the source BlockT of this iterator.
+ inline BlockT *getSource() {
+ assert(Inst && "Source not available, if basic block was malformed");
+ return Inst->getParent();
+ }
+};
+
+template <typename T, typename U> struct isPodLike<SuccIterator<T, U>> {
+ static const bool value = isPodLike<T>::value;
+};
+
+using succ_iterator = SuccIterator<Instruction, BasicBlock>;
+using succ_const_iterator = SuccIterator<const Instruction, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
using succ_const_range = iterator_range<succ_const_iterator>;
+inline succ_iterator succ_begin(Instruction *I) { return succ_iterator(I); }
+inline succ_const_iterator succ_begin(const Instruction *I) {
+ return succ_const_iterator(I);
+}
+inline succ_iterator succ_end(Instruction *I) { return succ_iterator(I, true); }
+inline succ_const_iterator succ_end(const Instruction *I) {
+ return succ_const_iterator(I, true);
+}
+inline bool succ_empty(const Instruction *I) {
+ return succ_begin(I) == succ_end(I);
+}
+inline unsigned succ_size(const Instruction *I) {
+ return std::distance(succ_begin(I), succ_end(I));
+}
+inline succ_range successors(Instruction *I) {
+ return succ_range(succ_begin(I), succ_end(I));
+}
+inline succ_const_range successors(const Instruction *I) {
+ return succ_const_range(succ_begin(I), succ_end(I));
+}
+
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
}
@@ -153,11 +293,6 @@ inline succ_const_range successors(const BasicBlock *BB) {
return succ_const_range(succ_begin(BB), succ_end(BB));
}
-template <typename T, typename U>
-struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
- static const bool value = isPodLike<T>::value;
-};
-
//===--------------------------------------------------------------------===//
// GraphTraits specializations for basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/IR/CFGDiff.h b/contrib/llvm/include/llvm/IR/CFGDiff.h
new file mode 100644
index 000000000000..da4373f7bce2
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/CFGDiff.h
@@ -0,0 +1,285 @@
+//===- CFGDiff.h - Define a CFG snapshot. -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines specializations of GraphTraits that allows generic
+// algorithms to see a different snapshot of a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CFGDIFF_H
+#define LLVM_IR_CFGDIFF_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/Support/CFGUpdate.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+// Two booleans are used to define orders in graphs:
+// InverseGraph defines when we need to reverse the whole graph and is as such
+// also equivalent to applying updates in reverse.
+// InverseEdge defines whether we want to change the edges direction. E.g., for
+// a non-inversed graph, the children are naturally the successors when
+// InverseEdge is false and the predecessors when InverseEdge is true.
+
+// We define two base clases that call into GraphDiff, one for successors
+// (CFGSuccessors), where InverseEdge is false, and one for predecessors
+// (CFGPredecessors), where InverseEdge is true.
+// FIXME: Further refactoring may merge the two base classes into a single one
+// templated / parametrized on using succ_iterator/pred_iterator and false/true
+// for the InverseEdge.
+
+// CFGViewSuccessors and CFGViewPredecessors, both can be parametrized to
+// consider the graph inverted or not (i.e. InverseGraph). Successors
+// implicitly has InverseEdge = false and Predecessors implicitly has
+// InverseEdge = true (see calls to GraphDiff methods in there). The GraphTraits
+// instantiations that follow define the value of InverseGraph.
+
+// GraphTraits instantiations:
+// - GraphDiff<BasicBlock *> is equivalent to InverseGraph = false
+// - GraphDiff<Inverse<BasicBlock *>> is equivalent to InverseGraph = true
+// - second pair item is BasicBlock *, then InverseEdge = false (so it inherits
+// from CFGViewSuccessors).
+// - second pair item is Inverse<BasicBlock *>, then InverseEdge = true (so it
+// inherits from CFGViewPredecessors).
+
+// The 4 GraphTraits are as follows:
+// 1. std::pair<const GraphDiff<BasicBlock *> *, BasicBlock *>> :
+// CFGViewSuccessors<false>
+// Regular CFG, children means successors, InverseGraph = false,
+// InverseEdge = false.
+// 2. std::pair<const GraphDiff<Inverse<BasicBlock *>> *, BasicBlock *>> :
+// CFGViewSuccessors<true>
+// Reverse the graph, get successors but reverse-apply updates,
+// InverseGraph = true, InverseEdge = false.
+// 3. std::pair<const GraphDiff<BasicBlock *> *, Inverse<BasicBlock *>>> :
+// CFGViewPredecessors<false>
+// Regular CFG, reverse edges, so children mean predecessors,
+// InverseGraph = false, InverseEdge = true.
+// 4. std::pair<const GraphDiff<Inverse<BasicBlock *>> *, Inverse<BasicBlock *>>
+// : CFGViewPredecessors<true>
+// Reverse the graph and the edges, InverseGraph = true, InverseEdge = true.
+
+namespace llvm {
+
+// GraphDiff defines a CFG snapshot: given a set of Update<NodePtr>, provide
+// utilities to skip edges marked as deleted and return a set of edges marked as
+// newly inserted. The current diff treats the CFG as a graph rather than a
+// multigraph. Added edges are pruned to be unique, and deleted edges will
+// remove all existing edges between two blocks.
+template <typename NodePtr, bool InverseGraph = false> class GraphDiff {
+ using UpdateMapType = SmallDenseMap<NodePtr, SmallVector<NodePtr, 2>>;
+ UpdateMapType SuccInsert;
+ UpdateMapType SuccDelete;
+ UpdateMapType PredInsert;
+ UpdateMapType PredDelete;
+ // Using a singleton empty vector for all BasicBlock requests with no
+ // children.
+ SmallVector<NodePtr, 1> Empty;
+
+ void printMap(raw_ostream &OS, const UpdateMapType &M) const {
+ for (auto Pair : M)
+ for (auto Child : Pair.second) {
+ OS << "(";
+ Pair.first->printAsOperand(OS, false);
+ OS << ", ";
+ Child->printAsOperand(OS, false);
+ OS << ") ";
+ }
+ OS << "\n";
+ }
+
+public:
+ GraphDiff() {}
+ GraphDiff(ArrayRef<cfg::Update<NodePtr>> Updates) {
+ SmallVector<cfg::Update<NodePtr>, 4> LegalizedUpdates;
+ cfg::LegalizeUpdates<NodePtr>(Updates, LegalizedUpdates, InverseGraph);
+ for (auto U : LegalizedUpdates) {
+ if (U.getKind() == cfg::UpdateKind::Insert) {
+ SuccInsert[U.getFrom()].push_back(U.getTo());
+ PredInsert[U.getTo()].push_back(U.getFrom());
+ } else {
+ SuccDelete[U.getFrom()].push_back(U.getTo());
+ PredDelete[U.getTo()].push_back(U.getFrom());
+ }
+ }
+ }
+
+ bool ignoreChild(const NodePtr BB, NodePtr EdgeEnd, bool InverseEdge) const {
+ auto &DeleteChildren =
+ (InverseEdge != InverseGraph) ? PredDelete : SuccDelete;
+ auto It = DeleteChildren.find(BB);
+ if (It == DeleteChildren.end())
+ return false;
+ auto &EdgesForBB = It->second;
+ return llvm::find(EdgesForBB, EdgeEnd) != EdgesForBB.end();
+ }
+
+ iterator_range<typename SmallVectorImpl<NodePtr>::const_iterator>
+ getAddedChildren(const NodePtr BB, bool InverseEdge) const {
+ auto &InsertChildren =
+ (InverseEdge != InverseGraph) ? PredInsert : SuccInsert;
+ auto It = InsertChildren.find(BB);
+ if (It == InsertChildren.end())
+ return make_range(Empty.begin(), Empty.end());
+ return make_range(It->second.begin(), It->second.end());
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << "===== GraphDiff: CFG edge changes to create a CFG snapshot. \n"
+ "===== (Note: notion of children/inverse_children depends on "
+ "the direction of edges and the graph.)\n";
+ OS << "Children to insert:\n\t";
+ printMap(OS, SuccInsert);
+ OS << "Children to delete:\n\t";
+ printMap(OS, SuccDelete);
+ OS << "Inverse_children to insert:\n\t";
+ printMap(OS, PredInsert);
+ OS << "Inverse_children to delete:\n\t";
+ printMap(OS, PredDelete);
+ OS << "\n";
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
+#endif
+};
+
+template <bool InverseGraph = false> struct CFGViewSuccessors {
+ using DataRef = const GraphDiff<BasicBlock *, InverseGraph> *;
+ using NodeRef = std::pair<DataRef, BasicBlock *>;
+
+ using ExistingChildIterator =
+ WrappedPairNodeDataIterator<succ_iterator, NodeRef, DataRef>;
+ struct DeletedEdgesFilter {
+ BasicBlock *BB;
+ DeletedEdgesFilter(BasicBlock *BB) : BB(BB){};
+ bool operator()(NodeRef N) const {
+ return !N.first->ignoreChild(BB, N.second, false);
+ }
+ };
+ using FilterExistingChildrenIterator =
+ filter_iterator<ExistingChildIterator, DeletedEdgesFilter>;
+
+ using vec_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
+ using AddNewChildrenIterator =
+ WrappedPairNodeDataIterator<vec_iterator, NodeRef, DataRef>;
+ using ChildIteratorType =
+ concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>;
+
+ static ChildIteratorType child_begin(NodeRef N) {
+ auto InsertVec = N.first->getAddedChildren(N.second, false);
+ // filter iterator init:
+ auto firstit = make_filter_range(
+ make_range<ExistingChildIterator>({succ_begin(N.second), N.first},
+ {succ_end(N.second), N.first}),
+ DeletedEdgesFilter(N.second));
+ // new inserts iterator init:
+ auto secondit = make_range<AddNewChildrenIterator>(
+ {InsertVec.begin(), N.first}, {InsertVec.end(), N.first});
+
+ return concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>(firstit, secondit);
+ }
+
+ static ChildIteratorType child_end(NodeRef N) {
+ auto InsertVec = N.first->getAddedChildren(N.second, false);
+ // filter iterator init:
+ auto firstit = make_filter_range(
+ make_range<ExistingChildIterator>({succ_end(N.second), N.first},
+ {succ_end(N.second), N.first}),
+ DeletedEdgesFilter(N.second));
+ // new inserts iterator init:
+ auto secondit = make_range<AddNewChildrenIterator>(
+ {InsertVec.end(), N.first}, {InsertVec.end(), N.first});
+
+ return concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>(firstit, secondit);
+ }
+};
+
+template <bool InverseGraph = false> struct CFGViewPredecessors {
+ using DataRef = const GraphDiff<BasicBlock *, InverseGraph> *;
+ using NodeRef = std::pair<DataRef, BasicBlock *>;
+
+ using ExistingChildIterator =
+ WrappedPairNodeDataIterator<pred_iterator, NodeRef, DataRef>;
+ struct DeletedEdgesFilter {
+ BasicBlock *BB;
+ DeletedEdgesFilter(BasicBlock *BB) : BB(BB){};
+ bool operator()(NodeRef N) const {
+ return !N.first->ignoreChild(BB, N.second, true);
+ }
+ };
+ using FilterExistingChildrenIterator =
+ filter_iterator<ExistingChildIterator, DeletedEdgesFilter>;
+
+ using vec_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
+ using AddNewChildrenIterator =
+ WrappedPairNodeDataIterator<vec_iterator, NodeRef, DataRef>;
+ using ChildIteratorType =
+ concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>;
+
+ static ChildIteratorType child_begin(NodeRef N) {
+ auto InsertVec = N.first->getAddedChildren(N.second, true);
+ // filter iterator init:
+ auto firstit = make_filter_range(
+ make_range<ExistingChildIterator>({pred_begin(N.second), N.first},
+ {pred_end(N.second), N.first}),
+ DeletedEdgesFilter(N.second));
+ // new inserts iterator init:
+ auto secondit = make_range<AddNewChildrenIterator>(
+ {InsertVec.begin(), N.first}, {InsertVec.end(), N.first});
+
+ return concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>(firstit, secondit);
+ }
+
+ static ChildIteratorType child_end(NodeRef N) {
+ auto InsertVec = N.first->getAddedChildren(N.second, true);
+ // filter iterator init:
+ auto firstit = make_filter_range(
+ make_range<ExistingChildIterator>({pred_end(N.second), N.first},
+ {pred_end(N.second), N.first}),
+ DeletedEdgesFilter(N.second));
+ // new inserts iterator init:
+ auto secondit = make_range<AddNewChildrenIterator>(
+ {InsertVec.end(), N.first}, {InsertVec.end(), N.first});
+
+ return concat_iterator<NodeRef, FilterExistingChildrenIterator,
+ AddNewChildrenIterator>(firstit, secondit);
+ }
+};
+
+template <>
+struct GraphTraits<
+ std::pair<const GraphDiff<BasicBlock *, false> *, BasicBlock *>>
+ : CFGViewSuccessors<false> {};
+template <>
+struct GraphTraits<
+ std::pair<const GraphDiff<BasicBlock *, true> *, BasicBlock *>>
+ : CFGViewSuccessors<true> {};
+template <>
+struct GraphTraits<
+ std::pair<const GraphDiff<BasicBlock *, false> *, Inverse<BasicBlock *>>>
+ : CFGViewPredecessors<false> {};
+template <>
+struct GraphTraits<
+ std::pair<const GraphDiff<BasicBlock *, true> *, Inverse<BasicBlock *>>>
+ : CFGViewPredecessors<true> {};
+} // end namespace llvm
+
+#endif // LLVM_IR_CFGDIFF_H
diff --git a/contrib/llvm/include/llvm/IR/CallSite.h b/contrib/llvm/include/llvm/IR/CallSite.h
index 2162ccb982b0..a3e78049f4be 100644
--- a/contrib/llvm/include/llvm/IR/CallSite.h
+++ b/contrib/llvm/include/llvm/IR/CallSite.h
@@ -656,10 +656,7 @@ public:
private:
IterTy getCallee() const {
- if (isCall()) // Skip Callee
- return cast<CallInst>(getInstruction())->op_end() - 1;
- else // Skip BB, BB, Callee
- return cast<InvokeInst>(getInstruction())->op_end() - 3;
+ return cast<CallBase>(getInstruction())->op_end() - 1;
}
};
diff --git a/contrib/llvm/include/llvm/IR/CallingConv.h b/contrib/llvm/include/llvm/IR/CallingConv.h
index b9c02d7ed424..49c3be960373 100644
--- a/contrib/llvm/include/llvm/IR/CallingConv.h
+++ b/contrib/llvm/include/llvm/IR/CallingConv.h
@@ -220,6 +220,9 @@ namespace CallingConv {
/// shader if tessellation is in use, or otherwise the vertex shader.
AMDGPU_ES = 96,
+ // Calling convention between AArch64 Advanced SIMD functions
+ AArch64_VectorCall = 97,
+
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/contrib/llvm/include/llvm/IR/Constant.h b/contrib/llvm/include/llvm/IR/Constant.h
index 5fdf0ea00f00..98437f8eff1f 100644
--- a/contrib/llvm/include/llvm/IR/Constant.h
+++ b/contrib/llvm/include/llvm/IR/Constant.h
@@ -114,7 +114,8 @@ public:
/// For aggregates (struct/array/vector) return the constant that corresponds
/// to the specified element if possible, or null if not. This can return null
- /// if the element index is a ConstantExpr, or if 'this' is a constant expr.
+ /// if the element index is a ConstantExpr, if 'this' is a constant expr or
+ /// if the constant does not fit into an uint64_t.
Constant *getAggregateElement(unsigned Elt) const;
Constant *getAggregateElement(Constant *Elt) const;
diff --git a/contrib/llvm/include/llvm/IR/Constants.h b/contrib/llvm/include/llvm/IR/Constants.h
index f9d5ebc560c7..afc93cd61d47 100644
--- a/contrib/llvm/include/llvm/IR/Constants.h
+++ b/contrib/llvm/include/llvm/IR/Constants.h
@@ -290,7 +290,11 @@ public:
static Constant *get(Type* Ty, StringRef Str);
static ConstantFP *get(LLVMContext &Context, const APFloat &V);
- static Constant *getNaN(Type *Ty, bool Negative = false, unsigned type = 0);
+ static Constant *getNaN(Type *Ty, bool Negative = false, uint64_t Payload = 0);
+ static Constant *getQNaN(Type *Ty, bool Negative = false,
+ APInt *Payload = nullptr);
+ static Constant *getSNaN(Type *Ty, bool Negative = false,
+ APInt *Payload = nullptr);
static Constant *getNegativeZero(Type *Ty);
static Constant *getInfinity(Type *Ty, bool Negative = false);
@@ -1114,6 +1118,13 @@ public:
static Constant *getSelect(Constant *C, Constant *V1, Constant *V2,
Type *OnlyIfReducedTy = nullptr);
+ /// get - Return a unary operator constant expression,
+ /// folding if possible.
+ ///
+ /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+ static Constant *get(unsigned Opcode, Constant *C1, unsigned Flags = 0,
+ Type *OnlyIfReducedTy = nullptr);
+
/// get - Return a binary or shift operator constant expression,
/// folding if possible.
///
diff --git a/contrib/llvm/include/llvm/IR/DIBuilder.h b/contrib/llvm/include/llvm/IR/DIBuilder.h
index 06c9421ec1d6..443332b1b23c 100644
--- a/contrib/llvm/include/llvm/IR/DIBuilder.h
+++ b/contrib/llvm/include/llvm/IR/DIBuilder.h
@@ -134,8 +134,8 @@ namespace llvm {
/// \param SplitDebugInlining Whether to emit inline debug info.
/// \param DebugInfoForProfiling Whether to emit extra debug info for
/// profile collection.
- /// \param GnuPubnames Whether to emit .debug_gnu_pubnames section instead
- /// of .debug_pubnames.
+ /// \param NameTableKind Whether to emit .debug_gnu_pubnames,
+ /// .debug_pubnames, or no pubnames at all.
DICompileUnit *
createCompileUnit(unsigned Lang, DIFile *File, StringRef Producer,
bool isOptimized, StringRef Flags, unsigned RV,
@@ -144,7 +144,9 @@ namespace llvm {
DICompileUnit::DebugEmissionKind::FullDebug,
uint64_t DWOId = 0, bool SplitDebugInlining = true,
bool DebugInfoForProfiling = false,
- bool GnuPubnames = false);
+ DICompileUnit::DebugNameTableKind NameTableKind =
+ DICompileUnit::DebugNameTableKind::Default,
+ bool RangesBaseAddress = false);
/// Create a file descriptor to hold debugging information for a file.
/// \param Filename File name.
@@ -188,9 +190,11 @@ namespace llvm {
/// type.
/// \param Name Type name.
/// \param SizeInBits Size of the type.
- /// \param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float.
+ /// \param Encoding DWARF encoding code, e.g., dwarf::DW_ATE_float.
+ /// \param Flags Optional DWARF attributes, e.g., DW_AT_endianity.
DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits,
- unsigned Encoding);
+ unsigned Encoding,
+ DINode::DIFlags Flags = DINode::FlagZero);
/// Create debugging information entry for a qualified
/// type, e.g. 'const int'.
@@ -498,11 +502,11 @@ namespace llvm {
/// \param Elements Enumeration elements.
/// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
/// \param UniqueIdentifier A unique identifier for the enum.
- /// \param IsFixed Boolean flag indicate if this is C++11/ObjC fixed enum.
+ /// \param IsScoped Boolean flag indicate if this is C++11/ObjC 'enum class'.
DICompositeType *createEnumerationType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
- DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsFixed = false);
+ DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsScoped = false);
/// Create subroutine type.
/// \param ParameterTypes An array of subroutine parameter types. This
@@ -580,14 +584,14 @@ namespace llvm {
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DIType *Ty, bool isLocalToUnit,
DIExpression *Expr = nullptr, MDNode *Decl = nullptr,
- uint32_t AlignInBits = 0);
+ MDTuple *templateParams = nullptr, uint32_t AlignInBits = 0);
/// Identical to createGlobalVariable
/// except that the resulting DbgNode is temporary and meant to be RAUWed.
DIGlobalVariable *createTempGlobalVariableFwdDecl(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DIType *Ty, bool isLocalToUnit, MDNode *Decl = nullptr,
- uint32_t AlignInBits = 0);
+ MDTuple *templateParams = nullptr, uint32_t AlignInBits = 0);
/// Create a new descriptor for an auto variable. This is a local variable
/// that is not a subprogram parameter.
@@ -649,29 +653,28 @@ namespace llvm {
/// \param File File where this variable is defined.
/// \param LineNo Line number.
/// \param Ty Function type.
- /// \param isLocalToUnit True if this function is not externally visible.
- /// \param isDefinition True if this is a function definition.
/// \param ScopeLine Set to the beginning of the scope this starts
/// \param Flags e.g. is this function prototyped or not.
/// These flags are used to emit dwarf attributes.
- /// \param isOptimized True if optimization is ON.
+ /// \param SPFlags Additional flags specific to subprograms.
/// \param TParams Function template parameters.
/// \param ThrownTypes Exception types this function may throw.
- DISubprogram *createFunction(
- DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned ScopeLine,
- DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
- DITemplateParameterArray TParams = nullptr,
- DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
+ DISubprogram *
+ createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned LineNo, DISubroutineType *Ty,
+ unsigned ScopeLine, DINode::DIFlags Flags = DINode::FlagZero,
+ DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
+ DITemplateParameterArray TParams = nullptr,
+ DISubprogram *Decl = nullptr,
+ DITypeArray ThrownTypes = nullptr);
/// Identical to createFunction,
/// except that the resulting DbgNode is meant to be RAUWed.
DISubprogram *createTempFunctionFwdDecl(
DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned ScopeLine,
- DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+ unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
+ DINode::DIFlags Flags = DINode::FlagZero,
+ DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
DITemplateParameterArray TParams = nullptr,
DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
@@ -683,10 +686,6 @@ namespace llvm {
/// \param File File where this variable is defined.
/// \param LineNo Line number.
/// \param Ty Function type.
- /// \param isLocalToUnit True if this function is not externally visible..
- /// \param isDefinition True if this is a function definition.
- /// \param Virtuality Attributes describing virtualness. e.g. pure
- /// virtual function.
/// \param VTableIndex Index no of this method in virtual table, or -1u if
/// unrepresentable.
/// \param ThisAdjustment
@@ -695,17 +694,18 @@ namespace llvm {
/// \param VTableHolder Type that holds vtable.
/// \param Flags e.g. is this function prototyped or not.
/// This flags are used to emit dwarf attributes.
- /// \param isOptimized True if optimization is ON.
+ /// \param SPFlags Additional flags specific to subprograms.
/// \param TParams Function template parameters.
/// \param ThrownTypes Exception types this function may throw.
- DISubprogram *createMethod(
- DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned Virtuality = 0, unsigned VTableIndex = 0,
- int ThisAdjustment = 0, DIType *VTableHolder = nullptr,
- DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
- DITemplateParameterArray TParams = nullptr,
- DITypeArray ThrownTypes = nullptr);
+ DISubprogram *
+ createMethod(DIScope *Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned LineNo, DISubroutineType *Ty,
+ unsigned VTableIndex = 0, int ThisAdjustment = 0,
+ DIType *VTableHolder = nullptr,
+ DINode::DIFlags Flags = DINode::FlagZero,
+ DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
+ DITemplateParameterArray TParams = nullptr,
+ DITypeArray ThrownTypes = nullptr);
/// This creates new descriptor for a namespace with the specified
/// parent scope.
diff --git a/contrib/llvm/include/llvm/IR/DataLayout.h b/contrib/llvm/include/llvm/IR/DataLayout.h
index d796a65e6129..c144d1c13c34 100644
--- a/contrib/llvm/include/llvm/IR/DataLayout.h
+++ b/contrib/llvm/include/llvm/IR/DataLayout.h
@@ -334,6 +334,9 @@ public:
/// the backends/clients are updated.
unsigned getPointerSize(unsigned AS = 0) const;
+ /// Returns the maximum pointer size over all address spaces.
+ unsigned getMaxPointerSize() const;
+
// Index size used for address calculation.
unsigned getIndexSize(unsigned AS) const;
@@ -361,6 +364,11 @@ public:
return getPointerSize(AS) * 8;
}
+ /// Returns the maximum pointer size over all address spaces.
+ unsigned getMaxPointerSizeInBits() const {
+ return getMaxPointerSize() * 8;
+ }
+
/// Size in bits of index used for address calculation in getelementptr.
unsigned getIndexSizeInBits(unsigned AS) const {
return getIndexSize(AS) * 8;
diff --git a/contrib/llvm/include/llvm/IR/DebugInfoFlags.def b/contrib/llvm/include/llvm/IR/DebugInfoFlags.def
index b1f5fac64232..ce117aa452aa 100644
--- a/contrib/llvm/include/llvm/IR/DebugInfoFlags.def
+++ b/contrib/llvm/include/llvm/IR/DebugInfoFlags.def
@@ -11,11 +11,20 @@
//
//===----------------------------------------------------------------------===//
-// TODO: Add other DW-based macros.
+#if !(defined HANDLE_DI_FLAG || defined HANDLE_DISP_FLAG)
+#error "Missing macro definition of HANDLE_DI*"
+#endif
+
#ifndef HANDLE_DI_FLAG
-#error "Missing macro definition of HANDLE_DI_FLAG"
+#define HANDLE_DI_FLAG(ID, NAME)
#endif
+#ifndef HANDLE_DISP_FLAG
+#define HANDLE_DISP_FLAG(ID, NAME)
+#endif
+
+// General flags kept in DINode.
+
HANDLE_DI_FLAG(0, Zero) // Use it as zero value.
// For example: void foo(DIFlags Flags = FlagZero).
HANDLE_DI_FLAG(1, Private)
@@ -45,9 +54,12 @@ HANDLE_DI_FLAG((1 << 20), NoReturn)
HANDLE_DI_FLAG((1 << 21), MainSubprogram)
HANDLE_DI_FLAG((1 << 22), TypePassByValue)
HANDLE_DI_FLAG((1 << 23), TypePassByReference)
-HANDLE_DI_FLAG((1 << 24), FixedEnum)
+HANDLE_DI_FLAG((1 << 24), EnumClass)
HANDLE_DI_FLAG((1 << 25), Thunk)
HANDLE_DI_FLAG((1 << 26), Trivial)
+HANDLE_DI_FLAG((1 << 27), BigEndian)
+HANDLE_DI_FLAG((1 << 28), LittleEndian)
+HANDLE_DI_FLAG((1 << 29), AllCallsDescribed)
// To avoid needing a dedicated value for IndirectVirtualBase, we use
// the bitwise or of Virtual and FwdDecl, which does not otherwise
@@ -57,8 +69,29 @@ HANDLE_DI_FLAG((1 << 2) | (1 << 5), IndirectVirtualBase)
#ifdef DI_FLAG_LARGEST_NEEDED
// intended to be used with ADT/BitmaskEnum.h
// NOTE: always must be equal to largest flag, check this when adding new flag
-HANDLE_DI_FLAG((1 << 26), Largest)
+HANDLE_DI_FLAG((1 << 29), Largest)
#undef DI_FLAG_LARGEST_NEEDED
#endif
+// Subprogram-specific flags kept in DISubprogram.
+
+// Use this as a zero/initialization value.
+// For example: void foo(DISPFlags Flags = SPFlagZero).
+HANDLE_DISP_FLAG(0, Zero)
+// Virtuality is a two-bit enum field in the LSB of the word.
+// Values should match DW_VIRTUALITY_*.
+HANDLE_DISP_FLAG(1u, Virtual)
+HANDLE_DISP_FLAG(2u, PureVirtual)
+HANDLE_DISP_FLAG((1u << 2), LocalToUnit)
+HANDLE_DISP_FLAG((1u << 3), Definition)
+HANDLE_DISP_FLAG((1u << 4), Optimized)
+
+#ifdef DISP_FLAG_LARGEST_NEEDED
+// Intended to be used with ADT/BitmaskEnum.h.
+// NOTE: Always must be equal to largest flag, check this when adding new flags.
+HANDLE_DISP_FLAG((1 << 4), Largest)
+#undef DISP_FLAG_LARGEST_NEEDED
+#endif
+
#undef HANDLE_DI_FLAG
+#undef HANDLE_DISP_FLAG
diff --git a/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
index 820746851104..a461d1bd4fe8 100644
--- a/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -713,6 +713,8 @@ public:
bool isTypePassByReference() const {
return getFlags() & FlagTypePassByReference;
}
+ bool isBigEndian() const { return getFlags() & FlagBigEndian; }
+ bool isLittleEndian() const { return getFlags() & FlagLittleEndian; }
static bool classof(const Metadata *MD) {
switch (MD->getMetadataID()) {
@@ -739,40 +741,43 @@ class DIBasicType : public DIType {
DIBasicType(LLVMContext &C, StorageType Storage, unsigned Tag,
uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding,
- ArrayRef<Metadata *> Ops)
+ DIFlags Flags, ArrayRef<Metadata *> Ops)
: DIType(C, DIBasicTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
- FlagZero, Ops),
+ Flags, Ops),
Encoding(Encoding) {}
~DIBasicType() = default;
static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
StringRef Name, uint64_t SizeInBits,
uint32_t AlignInBits, unsigned Encoding,
- StorageType Storage, bool ShouldCreate = true) {
+ DIFlags Flags, StorageType Storage,
+ bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
- SizeInBits, AlignInBits, Encoding, Storage, ShouldCreate);
+ SizeInBits, AlignInBits, Encoding, Flags, Storage,
+ ShouldCreate);
}
static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, uint64_t SizeInBits,
uint32_t AlignInBits, unsigned Encoding,
- StorageType Storage, bool ShouldCreate = true);
+ DIFlags Flags, StorageType Storage,
+ bool ShouldCreate = true);
TempDIBasicType cloneImpl() const {
return getTemporary(getContext(), getTag(), getName(), getSizeInBits(),
- getAlignInBits(), getEncoding());
+ getAlignInBits(), getEncoding(), getFlags());
}
public:
DEFINE_MDNODE_GET(DIBasicType, (unsigned Tag, StringRef Name),
- (Tag, Name, 0, 0, 0))
+ (Tag, Name, 0, 0, 0, FlagZero))
DEFINE_MDNODE_GET(DIBasicType,
(unsigned Tag, StringRef Name, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding),
- (Tag, Name, SizeInBits, AlignInBits, Encoding))
+ uint32_t AlignInBits, unsigned Encoding, DIFlags Flags),
+ (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags))
DEFINE_MDNODE_GET(DIBasicType,
(unsigned Tag, MDString *Name, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding),
- (Tag, Name, SizeInBits, AlignInBits, Encoding))
+ uint32_t AlignInBits, unsigned Encoding, DIFlags Flags),
+ (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags))
TempDIBasicType clone() const { return cloneImpl(); }
@@ -1162,11 +1167,21 @@ public:
NoDebug = 0,
FullDebug,
LineTablesOnly,
- LastEmissionKind = LineTablesOnly
+ DebugDirectivesOnly,
+ LastEmissionKind = DebugDirectivesOnly
+ };
+
+ enum class DebugNameTableKind : unsigned {
+ Default = 0,
+ GNU = 1,
+ None = 2,
+ LastDebugNameTableKind = None
};
static Optional<DebugEmissionKind> getEmissionKind(StringRef Str);
static const char *emissionKindString(DebugEmissionKind EK);
+ static Optional<DebugNameTableKind> getNameTableKind(StringRef Str);
+ static const char *nameTableKindString(DebugNameTableKind PK);
private:
unsigned SourceLanguage;
@@ -1176,17 +1191,20 @@ private:
uint64_t DWOId;
bool SplitDebugInlining;
bool DebugInfoForProfiling;
- bool GnuPubnames;
+ unsigned NameTableKind;
+ bool RangesBaseAddress;
DICompileUnit(LLVMContext &C, StorageType Storage, unsigned SourceLanguage,
bool IsOptimized, unsigned RuntimeVersion,
unsigned EmissionKind, uint64_t DWOId, bool SplitDebugInlining,
- bool DebugInfoForProfiling, bool GnuPubnames, ArrayRef<Metadata *> Ops)
+ bool DebugInfoForProfiling, unsigned NameTableKind,
+ bool RangesBaseAddress, ArrayRef<Metadata *> Ops)
: DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops),
SourceLanguage(SourceLanguage), IsOptimized(IsOptimized),
RuntimeVersion(RuntimeVersion), EmissionKind(EmissionKind),
DWOId(DWOId), SplitDebugInlining(SplitDebugInlining),
- DebugInfoForProfiling(DebugInfoForProfiling), GnuPubnames(GnuPubnames) {
+ DebugInfoForProfiling(DebugInfoForProfiling),
+ NameTableKind(NameTableKind), RangesBaseAddress(RangesBaseAddress) {
assert(Storage != Uniqued);
}
~DICompileUnit() = default;
@@ -1200,14 +1218,16 @@ private:
DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
- bool GnuPubnames, StorageType Storage, bool ShouldCreate = true) {
- return getImpl(
- Context, SourceLanguage, File, getCanonicalMDString(Context, Producer),
- IsOptimized, getCanonicalMDString(Context, Flags), RuntimeVersion,
- getCanonicalMDString(Context, SplitDebugFilename), EmissionKind,
- EnumTypes.get(), RetainedTypes.get(), GlobalVariables.get(),
- ImportedEntities.get(), Macros.get(), DWOId, SplitDebugInlining,
- DebugInfoForProfiling, GnuPubnames, Storage, ShouldCreate);
+ unsigned NameTableKind, bool RangesBaseAddress, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, SourceLanguage, File,
+ getCanonicalMDString(Context, Producer), IsOptimized,
+ getCanonicalMDString(Context, Flags), RuntimeVersion,
+ getCanonicalMDString(Context, SplitDebugFilename),
+ EmissionKind, EnumTypes.get(), RetainedTypes.get(),
+ GlobalVariables.get(), ImportedEntities.get(), Macros.get(),
+ DWOId, SplitDebugInlining, DebugInfoForProfiling,
+ NameTableKind, RangesBaseAddress, Storage, ShouldCreate);
}
static DICompileUnit *
getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
@@ -1216,17 +1236,17 @@ private:
unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
Metadata *GlobalVariables, Metadata *ImportedEntities,
Metadata *Macros, uint64_t DWOId, bool SplitDebugInlining,
- bool DebugInfoForProfiling, bool GnuPubnames, StorageType Storage,
- bool ShouldCreate = true);
+ bool DebugInfoForProfiling, unsigned NameTableKind,
+ bool RangesBaseAddress, StorageType Storage, bool ShouldCreate = true);
TempDICompileUnit cloneImpl() const {
- return getTemporary(getContext(), getSourceLanguage(), getFile(),
- getProducer(), isOptimized(), getFlags(),
- getRuntimeVersion(), getSplitDebugFilename(),
- getEmissionKind(), getEnumTypes(), getRetainedTypes(),
- getGlobalVariables(), getImportedEntities(),
- getMacros(), DWOId, getSplitDebugInlining(),
- getDebugInfoForProfiling(), getGnuPubnames());
+ return getTemporary(
+ getContext(), getSourceLanguage(), getFile(), getProducer(),
+ isOptimized(), getFlags(), getRuntimeVersion(), getSplitDebugFilename(),
+ getEmissionKind(), getEnumTypes(), getRetainedTypes(),
+ getGlobalVariables(), getImportedEntities(), getMacros(), DWOId,
+ getSplitDebugInlining(), getDebugInfoForProfiling(), getNameTableKind(),
+ getRangesBaseAddress());
}
public:
@@ -1242,11 +1262,11 @@ public:
DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
- bool GnuPubnames),
+ DebugNameTableKind NameTableKind, bool RangesBaseAddress),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
- DebugInfoForProfiling, GnuPubnames))
+ DebugInfoForProfiling, (unsigned)NameTableKind, RangesBaseAddress))
DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
DICompileUnit,
(unsigned SourceLanguage, Metadata *File, MDString *Producer,
@@ -1254,11 +1274,12 @@ public:
MDString *SplitDebugFilename, unsigned EmissionKind, Metadata *EnumTypes,
Metadata *RetainedTypes, Metadata *GlobalVariables,
Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
- bool SplitDebugInlining, bool DebugInfoForProfiling, bool GnuPubnames),
+ bool SplitDebugInlining, bool DebugInfoForProfiling,
+ unsigned NameTableKind, bool RangesBaseAddress),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
- DebugInfoForProfiling, GnuPubnames))
+ DebugInfoForProfiling, NameTableKind, RangesBaseAddress))
TempDICompileUnit clone() const { return cloneImpl(); }
@@ -1268,11 +1289,21 @@ public:
DebugEmissionKind getEmissionKind() const {
return (DebugEmissionKind)EmissionKind;
}
+ bool isDebugDirectivesOnly() const {
+ return EmissionKind == DebugDirectivesOnly;
+ }
bool getDebugInfoForProfiling() const { return DebugInfoForProfiling; }
- bool getGnuPubnames() const { return GnuPubnames; }
- StringRef getProducer() const { return getStringOperand(1); }
- StringRef getFlags() const { return getStringOperand(2); }
- StringRef getSplitDebugFilename() const { return getStringOperand(3); }
+ DebugNameTableKind getNameTableKind() const {
+ return (DebugNameTableKind)NameTableKind;
+ }
+ bool getRangesBaseAddress() const {
+ return RangesBaseAddress; }
+ StringRef getProducer() const {
+ return getStringOperand(1); }
+ StringRef getFlags() const {
+ return getStringOperand(2); }
+ StringRef getSplitDebugFilename() const {
+ return getStringOperand(3); }
DICompositeTypeArray getEnumTypes() const {
return cast_or_null<MDTuple>(getRawEnumTypes());
}
@@ -1372,19 +1403,20 @@ class DILocation : public MDNode {
friend class MDNode;
DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
- unsigned Column, ArrayRef<Metadata *> MDs);
+ unsigned Column, ArrayRef<Metadata *> MDs, bool ImplicitCode);
~DILocation() { dropAllReferences(); }
static DILocation *getImpl(LLVMContext &Context, unsigned Line,
unsigned Column, Metadata *Scope,
- Metadata *InlinedAt, StorageType Storage,
- bool ShouldCreate = true);
+ Metadata *InlinedAt, bool ImplicitCode,
+ StorageType Storage, bool ShouldCreate = true);
static DILocation *getImpl(LLVMContext &Context, unsigned Line,
unsigned Column, DILocalScope *Scope,
- DILocation *InlinedAt, StorageType Storage,
- bool ShouldCreate = true) {
+ DILocation *InlinedAt, bool ImplicitCode,
+ StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Line, Column, static_cast<Metadata *>(Scope),
- static_cast<Metadata *>(InlinedAt), Storage, ShouldCreate);
+ static_cast<Metadata *>(InlinedAt), ImplicitCode, Storage,
+ ShouldCreate);
}
/// With a given unsigned int \p U, use up to 13 bits to represent it.
@@ -1398,6 +1430,9 @@ class DILocation : public MDNode {
/// Reverse transformation as getPrefixEncodingFromUnsigned.
static unsigned getUnsignedFromPrefixEncoding(unsigned U) {
+ if (U & 1)
+ return 0;
+ U >>= 1;
return (U & 0x20) ? (((U >> 1) & 0xfe0) | (U & 0x1f)) : (U & 0x1f);
}
@@ -1413,7 +1448,15 @@ class DILocation : public MDNode {
// Get the raw scope/inlinedAt since it is possible to invoke this on
// a DILocation containing temporary metadata.
return getTemporary(getContext(), getLine(), getColumn(), getRawScope(),
- getRawInlinedAt());
+ getRawInlinedAt(), isImplicitCode());
+ }
+
+ static unsigned encodeComponent(unsigned C) {
+ return (C == 0) ? 1U : (getPrefixEncodingFromUnsigned(C) << 1);
+ }
+
+ static unsigned encodingBits(unsigned C) {
+ return (C == 0) ? 1 : (C > 0x1f ? 14 : 7);
}
public:
@@ -1422,12 +1465,13 @@ public:
DEFINE_MDNODE_GET(DILocation,
(unsigned Line, unsigned Column, Metadata *Scope,
- Metadata *InlinedAt = nullptr),
- (Line, Column, Scope, InlinedAt))
+ Metadata *InlinedAt = nullptr, bool ImplicitCode = false),
+ (Line, Column, Scope, InlinedAt, ImplicitCode))
DEFINE_MDNODE_GET(DILocation,
(unsigned Line, unsigned Column, DILocalScope *Scope,
- DILocation *InlinedAt = nullptr),
- (Line, Column, Scope, InlinedAt))
+ DILocation *InlinedAt = nullptr,
+ bool ImplicitCode = false),
+ (Line, Column, Scope, InlinedAt, ImplicitCode))
/// Return a (temporary) clone of this.
TempDILocation clone() const { return cloneImpl(); }
@@ -1440,6 +1484,15 @@ public:
return cast_or_null<DILocation>(getRawInlinedAt());
}
+ /// Check if the location corresponds to an implicit code.
+ /// When the ImplicitCode flag is true, it means that the Instruction
+ /// with this DILocation has been added by the front-end but it hasn't been
+ /// written explicitly by the user (e.g. cleanup stuff in C++ put on a closing
+ /// bracket). It's useful for code coverage to not show a counter on "empty"
+ /// lines.
+ bool isImplicitCode() const { return ImplicitCode; }
+ void setImplicitCode(bool ImplicitCode) { this->ImplicitCode = ImplicitCode; }
+
DIFile *getFile() const { return getScope()->getFile(); }
StringRef getFilename() const { return getScope()->getFilename(); }
StringRef getDirectory() const { return getScope()->getDirectory(); }
@@ -1455,19 +1508,6 @@ public:
return getScope();
}
- /// Check whether this can be discriminated from another location.
- ///
- /// Check \c this can be discriminated from \c RHS in a linetable entry.
- /// Scope and inlined-at chains are not recorded in the linetable, so they
- /// cannot be used to distinguish basic blocks.
- bool canDiscriminate(const DILocation &RHS) const {
- return getLine() != RHS.getLine() ||
- getColumn() != RHS.getColumn() ||
- getDiscriminator() != RHS.getDiscriminator() ||
- getFilename() != RHS.getFilename() ||
- getDirectory() != RHS.getDirectory();
- }
-
/// Get the DWARF discriminator.
///
/// DWARF discriminators distinguish identical file locations between
@@ -1489,20 +1529,35 @@ public:
/// order. If the lowest bit is 1, the current component is empty, and the
/// next component will start in the next bit. Otherwise, the current
/// component is non-empty, and its content starts in the next bit. The
- /// length of each components is either 5 bit or 12 bit: if the 7th bit
+ /// value of each components is either 5 bit or 12 bit: if the 7th bit
/// is 0, the bit 2~6 (5 bits) are used to represent the component; if the
/// 7th bit is 1, the bit 2~6 (5 bits) and 8~14 (7 bits) are combined to
- /// represent the component.
+ /// represent the component. Thus, the number of bits used for a component
+ /// is either 0 (if it and all the next components are empty); 1 - if it is
+ /// empty; 7 - if its value is up to and including 0x1f (lsb and msb are both
+ /// 0); or 14, if its value is up to and including 0x1ff. Note that the last
+ /// component is also capped at 0x1ff, even in the case when both first
+ /// components are 0, and we'd technically have 29 bits available.
+ ///
+ /// For precise control over the data being encoded in the discriminator,
+ /// use encodeDiscriminator/decodeDiscriminator.
+ ///
+ /// Use {get|set}BaseDiscriminator and cloneWithDuplicationFactor after reading
+ /// their documentation, as their behavior has side-effects.
inline unsigned getDiscriminator() const;
/// Returns a new DILocation with updated \p Discriminator.
inline const DILocation *cloneWithDiscriminator(unsigned Discriminator) const;
- /// Returns a new DILocation with updated base discriminator \p BD.
- inline const DILocation *setBaseDiscriminator(unsigned BD) const;
+ /// Returns a new DILocation with updated base discriminator \p BD. Only the
+ /// base discriminator is set in the new DILocation, the other encoded values
+ /// are elided.
+ /// If the discriminator cannot be encoded, the function returns None.
+ inline Optional<const DILocation *> setBaseDiscriminator(unsigned BD) const;
- /// Returns the duplication factor stored in the discriminator.
+ /// Returns the duplication factor stored in the discriminator, or 1 if no
+ /// duplication factor (or 0) is encoded.
inline unsigned getDuplicationFactor() const;
/// Returns the copy identifier stored in the discriminator.
@@ -1511,11 +1566,11 @@ public:
/// Returns the base discriminator stored in the discriminator.
inline unsigned getBaseDiscriminator() const;
- /// Returns a new DILocation with duplication factor \p DF encoded in the
- /// discriminator.
- inline const DILocation *cloneWithDuplicationFactor(unsigned DF) const;
-
- enum { NoGeneratedLocation = false, WithGeneratedLocation = true };
+ /// Returns a new DILocation with duplication factor \p DF * current
+ /// duplication factor encoded in the discriminator. The current duplication
+ /// factor is as defined by getDuplicationFactor().
+ /// Returns None if encoding failed.
+ inline Optional<const DILocation *> cloneWithDuplicationFactor(unsigned DF) const;
/// When two instructions are combined into a single instruction we also
/// need to combine the original locations into a single location.
@@ -1531,25 +1586,36 @@ public:
///
/// \p GenerateLocation: Whether the merged location can be generated when
/// \p LocA and \p LocB differ.
- static const DILocation *
- getMergedLocation(const DILocation *LocA, const DILocation *LocB,
- bool GenerateLocation = NoGeneratedLocation);
+ static const DILocation *getMergedLocation(const DILocation *LocA,
+ const DILocation *LocB);
/// Returns the base discriminator for a given encoded discriminator \p D.
static unsigned getBaseDiscriminatorFromDiscriminator(unsigned D) {
- if ((D & 1) == 0)
- return getUnsignedFromPrefixEncoding(D >> 1);
- else
- return 0;
+ return getUnsignedFromPrefixEncoding(D);
}
- /// Returns the duplication factor for a given encoded discriminator \p D.
+ /// Raw encoding of the discriminator. APIs such as setBaseDiscriminator or
+ /// cloneWithDuplicationFactor have certain side-effects. This API, in
+ /// conjunction with cloneWithDiscriminator, may be used to encode precisely
+ /// the values provided. \p BD: base discriminator \p DF: duplication factor
+ /// \p CI: copy index
+ /// The return is None if the values cannot be encoded in 32 bits - for
+ /// example, values for BD or DF larger than 12 bits. Otherwise, the return
+ /// is the encoded value.
+ static Optional<unsigned> encodeDiscriminator(unsigned BD, unsigned DF, unsigned CI);
+
+ /// Raw decoder for values in an encoded discriminator D.
+ static void decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
+ unsigned &CI);
+
+ /// Returns the duplication factor for a given encoded discriminator \p D, or
+ /// 1 if no value or 0 is encoded.
static unsigned getDuplicationFactorFromDiscriminator(unsigned D) {
D = getNextComponentInDiscriminator(D);
- if (D == 0 || (D & 1))
+ unsigned Ret = getUnsignedFromPrefixEncoding(D);
+ if (Ret == 0)
return 1;
- else
- return getUnsignedFromPrefixEncoding(D >> 1);
+ return Ret;
}
/// Returns the copy identifier for a given encoded discriminator \p D.
@@ -1588,102 +1654,118 @@ class DISubprogram : public DILocalScope {
/// negative.
int ThisAdjustment;
- // Virtuality can only assume three values, so we can pack
- // in 2 bits (none/pure/pure_virtual).
- unsigned Virtuality : 2;
+public:
+ /// Debug info subprogram flags.
+ enum DISPFlags : uint32_t {
+#define HANDLE_DISP_FLAG(ID, NAME) SPFlag##NAME = ID,
+#define DISP_FLAG_LARGEST_NEEDED
+#include "llvm/IR/DebugInfoFlags.def"
+ SPFlagNonvirtual = SPFlagZero,
+ SPFlagVirtuality = SPFlagVirtual | SPFlagPureVirtual,
+ LLVM_MARK_AS_BITMASK_ENUM(SPFlagLargest)
+ };
- // These are boolean flags so one bit is enough.
- // MSVC starts a new container field every time the base
- // type changes so we can't use 'bool' to ensure these bits
- // are packed.
- unsigned IsLocalToUnit : 1;
- unsigned IsDefinition : 1;
- unsigned IsOptimized : 1;
+ static DISPFlags getFlag(StringRef Flag);
+ static StringRef getFlagString(DISPFlags Flag);
- unsigned Padding : 3;
+ /// Split up a flags bitfield for easier printing.
+ ///
+ /// Split \c Flags into \c SplitFlags, a vector of its components. Returns
+ /// any remaining (unrecognized) bits.
+ static DISPFlags splitFlags(DISPFlags Flags,
+ SmallVectorImpl<DISPFlags> &SplitFlags);
+
+ // Helper for converting old bitfields to new flags word.
+ static DISPFlags toSPFlags(bool IsLocalToUnit, bool IsDefinition,
+ bool IsOptimized,
+ unsigned Virtuality = SPFlagNonvirtual) {
+ // We're assuming virtuality is the low-order field.
+ static_assert(
+ int(SPFlagVirtual) == int(dwarf::DW_VIRTUALITY_virtual) &&
+ int(SPFlagPureVirtual) == int(dwarf::DW_VIRTUALITY_pure_virtual),
+ "Virtuality constant mismatch");
+ return static_cast<DISPFlags>(
+ (Virtuality & SPFlagVirtuality) |
+ (IsLocalToUnit ? SPFlagLocalToUnit : SPFlagZero) |
+ (IsDefinition ? SPFlagDefinition : SPFlagZero) |
+ (IsOptimized ? SPFlagOptimized : SPFlagZero));
+ }
+private:
DIFlags Flags;
+ DISPFlags SPFlags;
DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
- unsigned ScopeLine, unsigned Virtuality, unsigned VirtualIndex,
- int ThisAdjustment, DIFlags Flags, bool IsLocalToUnit,
- bool IsDefinition, bool IsOptimized, ArrayRef<Metadata *> Ops)
+ unsigned ScopeLine, unsigned VirtualIndex, int ThisAdjustment,
+ DIFlags Flags, DISPFlags SPFlags, ArrayRef<Metadata *> Ops)
: DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram,
Ops),
Line(Line), ScopeLine(ScopeLine), VirtualIndex(VirtualIndex),
- ThisAdjustment(ThisAdjustment), Virtuality(Virtuality),
- IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition),
- IsOptimized(IsOptimized), Flags(Flags) {
+ ThisAdjustment(ThisAdjustment), Flags(Flags), SPFlags(SPFlags) {
static_assert(dwarf::DW_VIRTUALITY_max < 4, "Virtuality out of range");
- assert(Virtuality < 4 && "Virtuality out of range");
}
~DISubprogram() = default;
static DISubprogram *
getImpl(LLVMContext &Context, DIScopeRef Scope, StringRef Name,
StringRef LinkageName, DIFile *File, unsigned Line,
- DISubroutineType *Type, bool IsLocalToUnit, bool IsDefinition,
- unsigned ScopeLine, DITypeRef ContainingType, unsigned Virtuality,
+ DISubroutineType *Type, unsigned ScopeLine, DITypeRef ContainingType,
unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
- bool IsOptimized, DICompileUnit *Unit,
+ DISPFlags SPFlags, DICompileUnit *Unit,
DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
DINodeArray RetainedNodes, DITypeArray ThrownTypes,
StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
getCanonicalMDString(Context, LinkageName), File, Line, Type,
- IsLocalToUnit, IsDefinition, ScopeLine, ContainingType,
- Virtuality, VirtualIndex, ThisAdjustment, Flags, IsOptimized,
- Unit, TemplateParams.get(), Declaration, RetainedNodes.get(),
- ThrownTypes.get(), Storage, ShouldCreate);
+ ScopeLine, ContainingType, VirtualIndex, ThisAdjustment,
+ Flags, SPFlags, Unit, TemplateParams.get(), Declaration,
+ RetainedNodes.get(), ThrownTypes.get(), Storage,
+ ShouldCreate);
}
- static DISubprogram *
- getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
- MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
- bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
- Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
- int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
- Metadata *TemplateParams, Metadata *Declaration, Metadata *RetainedNodes,
- Metadata *ThrownTypes, StorageType Storage, bool ShouldCreate = true);
+ static DISubprogram *getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, MDString *LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ unsigned ScopeLine, Metadata *ContainingType,
+ unsigned VirtualIndex, int ThisAdjustment,
+ DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
+ Metadata *TemplateParams, Metadata *Declaration,
+ Metadata *RetainedNodes, Metadata *ThrownTypes,
+ StorageType Storage, bool ShouldCreate = true);
TempDISubprogram cloneImpl() const {
return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
- getFile(), getLine(), getType(), isLocalToUnit(),
- isDefinition(), getScopeLine(), getContainingType(),
- getVirtuality(), getVirtualIndex(), getThisAdjustment(),
- getFlags(), isOptimized(), getUnit(),
- getTemplateParams(), getDeclaration(), getRetainedNodes(),
- getThrownTypes());
+ getFile(), getLine(), getType(), getScopeLine(),
+ getContainingType(), getVirtualIndex(),
+ getThisAdjustment(), getFlags(), getSPFlags(),
+ getUnit(), getTemplateParams(), getDeclaration(),
+ getRetainedNodes(), getThrownTypes());
}
public:
- DEFINE_MDNODE_GET(DISubprogram,
- (DIScopeRef Scope, StringRef Name, StringRef LinkageName,
- DIFile *File, unsigned Line, DISubroutineType *Type,
- bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
- DITypeRef ContainingType, unsigned Virtuality,
- unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
- bool IsOptimized, DICompileUnit *Unit,
- DITemplateParameterArray TemplateParams = nullptr,
- DISubprogram *Declaration = nullptr,
- DINodeArray RetainedNodes = nullptr,
- DITypeArray ThrownTypes = nullptr),
- (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, ScopeLine, ContainingType, Virtuality,
- VirtualIndex, ThisAdjustment, Flags, IsOptimized, Unit,
- TemplateParams, Declaration, RetainedNodes, ThrownTypes))
+ DEFINE_MDNODE_GET(
+ DISubprogram,
+ (DIScopeRef Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned Line, DISubroutineType *Type, unsigned ScopeLine,
+ DITypeRef ContainingType, unsigned VirtualIndex, int ThisAdjustment,
+ DIFlags Flags, DISPFlags SPFlags, DICompileUnit *Unit,
+ DITemplateParameterArray TemplateParams = nullptr,
+ DISubprogram *Declaration = nullptr, DINodeArray RetainedNodes = nullptr,
+ DITypeArray ThrownTypes = nullptr),
+ (Scope, Name, LinkageName, File, Line, Type, ScopeLine, ContainingType,
+ VirtualIndex, ThisAdjustment, Flags, SPFlags, Unit, TemplateParams,
+ Declaration, RetainedNodes, ThrownTypes))
+
DEFINE_MDNODE_GET(
DISubprogram,
(Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
- unsigned Line, Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
- unsigned ScopeLine, Metadata *ContainingType, unsigned Virtuality,
- unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
- bool IsOptimized, Metadata *Unit, Metadata *TemplateParams = nullptr,
- Metadata *Declaration = nullptr, Metadata *RetainedNodes = nullptr,
- Metadata *ThrownTypes = nullptr),
- (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
- ScopeLine, ContainingType, Virtuality, VirtualIndex, ThisAdjustment,
- Flags, IsOptimized, Unit, TemplateParams, Declaration, RetainedNodes,
- ThrownTypes))
+ unsigned Line, Metadata *Type, unsigned ScopeLine,
+ Metadata *ContainingType, unsigned VirtualIndex, int ThisAdjustment,
+ DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
+ Metadata *TemplateParams = nullptr, Metadata *Declaration = nullptr,
+ Metadata *RetainedNodes = nullptr, Metadata *ThrownTypes = nullptr),
+ (Scope, Name, LinkageName, File, Line, Type, ScopeLine, ContainingType,
+ VirtualIndex, ThisAdjustment, Flags, SPFlags, Unit, TemplateParams,
+ Declaration, RetainedNodes, ThrownTypes))
TempDISubprogram clone() const { return cloneImpl(); }
@@ -1696,14 +1778,15 @@ public:
public:
unsigned getLine() const { return Line; }
- unsigned getVirtuality() const { return Virtuality; }
+ unsigned getVirtuality() const { return getSPFlags() & SPFlagVirtuality; }
unsigned getVirtualIndex() const { return VirtualIndex; }
int getThisAdjustment() const { return ThisAdjustment; }
unsigned getScopeLine() const { return ScopeLine; }
DIFlags getFlags() const { return Flags; }
- bool isLocalToUnit() const { return IsLocalToUnit; }
- bool isDefinition() const { return IsDefinition; }
- bool isOptimized() const { return IsOptimized; }
+ DISPFlags getSPFlags() const { return SPFlags; }
+ bool isLocalToUnit() const { return getSPFlags() & SPFlagLocalToUnit; }
+ bool isDefinition() const { return getSPFlags() & SPFlagDefinition; }
+ bool isOptimized() const { return getSPFlags() & SPFlagOptimized; }
bool isArtificial() const { return getFlags() & FlagArtificial; }
bool isPrivate() const {
@@ -1717,6 +1800,9 @@ public:
}
bool isExplicit() const { return getFlags() & FlagExplicit; }
bool isPrototyped() const { return getFlags() & FlagPrototyped; }
+ bool areAllCallsDescribed() const {
+ return getFlags() & FlagAllCallsDescribed;
+ }
bool isMainSubprogram() const { return getFlags() & FlagMainSubprogram; }
/// Check if this is reference-qualified.
@@ -1953,28 +2039,24 @@ unsigned DILocation::getCopyIdentifier() const {
return getCopyIdentifierFromDiscriminator(getDiscriminator());
}
-const DILocation *DILocation::setBaseDiscriminator(unsigned D) const {
+Optional<const DILocation *> DILocation::setBaseDiscriminator(unsigned D) const {
if (D == 0)
return this;
- else
- return cloneWithDiscriminator(getPrefixEncodingFromUnsigned(D) << 1);
+ if (D > 0xfff)
+ return None;
+ return cloneWithDiscriminator(encodeComponent(D));
}
-const DILocation *DILocation::cloneWithDuplicationFactor(unsigned DF) const {
+Optional<const DILocation *> DILocation::cloneWithDuplicationFactor(unsigned DF) const {
DF *= getDuplicationFactor();
if (DF <= 1)
return this;
unsigned BD = getBaseDiscriminator();
- unsigned CI = getCopyIdentifier() << (DF > 0x1f ? 14 : 7);
- unsigned D = CI | (getPrefixEncodingFromUnsigned(DF) << 1);
-
- if (BD == 0)
- D = (D << 1) | 1;
- else
- D = (D << (BD > 0x1f ? 14 : 7)) | (getPrefixEncodingFromUnsigned(BD) << 1);
-
- return cloneWithDiscriminator(D);
+ unsigned CI = getCopyIdentifier();
+ if (Optional<unsigned> D = encodeDiscriminator(BD, DF, CI))
+ return cloneWithDiscriminator(*D);
+ return None;
}
class DINamespace : public DIScope {
@@ -2515,30 +2597,30 @@ class DIGlobalVariable : public DIVariable {
IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
~DIGlobalVariable() = default;
- static DIGlobalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
- StringRef Name, StringRef LinkageName,
- DIFile *File, unsigned Line, DITypeRef Type,
- bool IsLocalToUnit, bool IsDefinition,
- DIDerivedType *StaticDataMemberDeclaration,
- uint32_t AlignInBits, StorageType Storage,
- bool ShouldCreate = true) {
+ static DIGlobalVariable *
+ getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
+ StringRef LinkageName, DIFile *File, unsigned Line, DITypeRef Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ DIDerivedType *StaticDataMemberDeclaration, MDTuple *TemplateParams,
+ uint32_t AlignInBits, StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
getCanonicalMDString(Context, LinkageName), File, Line, Type,
IsLocalToUnit, IsDefinition, StaticDataMemberDeclaration,
- AlignInBits, Storage, ShouldCreate);
+ cast_or_null<Metadata>(TemplateParams), AlignInBits, Storage,
+ ShouldCreate);
}
static DIGlobalVariable *
getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
bool IsLocalToUnit, bool IsDefinition,
- Metadata *StaticDataMemberDeclaration, uint32_t AlignInBits,
- StorageType Storage, bool ShouldCreate = true);
+ Metadata *StaticDataMemberDeclaration, Metadata *TemplateParams,
+ uint32_t AlignInBits, StorageType Storage, bool ShouldCreate = true);
TempDIGlobalVariable cloneImpl() const {
return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
getFile(), getLine(), getType(), isLocalToUnit(),
isDefinition(), getStaticDataMemberDeclaration(),
- getAlignInBits());
+ getTemplateParams(), getAlignInBits());
}
public:
@@ -2547,17 +2629,19 @@ public:
DIFile *File, unsigned Line, DITypeRef Type,
bool IsLocalToUnit, bool IsDefinition,
DIDerivedType *StaticDataMemberDeclaration,
- uint32_t AlignInBits),
+ MDTuple *TemplateParams, uint32_t AlignInBits),
(Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+ IsDefinition, StaticDataMemberDeclaration, TemplateParams,
+ AlignInBits))
DEFINE_MDNODE_GET(DIGlobalVariable,
(Metadata * Scope, MDString *Name, MDString *LinkageName,
Metadata *File, unsigned Line, Metadata *Type,
bool IsLocalToUnit, bool IsDefinition,
Metadata *StaticDataMemberDeclaration,
- uint32_t AlignInBits),
+ Metadata *TemplateParams, uint32_t AlignInBits),
(Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+ IsDefinition, StaticDataMemberDeclaration, TemplateParams,
+ AlignInBits))
TempDIGlobalVariable clone() const { return cloneImpl(); }
@@ -2571,6 +2655,8 @@ public:
MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(6); }
+ Metadata *getRawTemplateParams() const { return getOperand(7); }
+ MDTuple *getTemplateParams() const { return getOperandAs<MDTuple>(7); }
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DIGlobalVariableKind;
diff --git a/contrib/llvm/include/llvm/IR/DebugLoc.h b/contrib/llvm/include/llvm/IR/DebugLoc.h
index 9f619ffc5c4d..4f0d7f51b5f9 100644
--- a/contrib/llvm/include/llvm/IR/DebugLoc.h
+++ b/contrib/llvm/include/llvm/IR/DebugLoc.h
@@ -78,7 +78,8 @@ namespace llvm {
///
/// FIXME: Remove this. Users should use DILocation::get().
static DebugLoc get(unsigned Line, unsigned Col, const MDNode *Scope,
- const MDNode *InlinedAt = nullptr);
+ const MDNode *InlinedAt = nullptr,
+ bool ImplicitCode = false);
enum { ReplaceLastInlinedAt = true };
/// Rebuild the entire inlined-at chain for this instruction so that the top of
@@ -112,6 +113,10 @@ namespace llvm {
/// Return \c this as a bar \a MDNode.
MDNode *getAsMDNode() const { return Loc; }
+ /// Check if the DebugLoc corresponds to an implicit code.
+ bool isImplicitCode() const;
+ void setImplicitCode(bool ImplicitCode);
+
bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }
diff --git a/contrib/llvm/include/llvm/IR/DiagnosticInfo.h b/contrib/llvm/include/llvm/IR/DiagnosticInfo.h
index 81d4ae84bf01..3a55a7dca7f4 100644
--- a/contrib/llvm/include/llvm/IR/DiagnosticInfo.h
+++ b/contrib/llvm/include/llvm/IR/DiagnosticInfo.h
@@ -101,6 +101,7 @@ private:
/// Severity gives the severity of the diagnostic.
const DiagnosticSeverity Severity;
+ virtual void anchor();
public:
DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
: Kind(Kind), Severity(Severity) {}
@@ -210,6 +211,7 @@ public:
};
class DiagnosticInfoStackSize : public DiagnosticInfoResourceLimit {
+ virtual void anchor() override;
public:
DiagnosticInfoStackSize(const Function &Fn, uint64_t StackSize,
DiagnosticSeverity Severity = DS_Warning,
@@ -340,7 +342,7 @@ private:
};
class DiagnosticLocation {
- StringRef Filename;
+ DIFile *File = nullptr;
unsigned Line = 0;
unsigned Column = 0;
@@ -349,14 +351,18 @@ public:
DiagnosticLocation(const DebugLoc &DL);
DiagnosticLocation(const DISubprogram *SP);
- bool isValid() const { return !Filename.empty(); }
- StringRef getFilename() const { return Filename; }
+ bool isValid() const { return File; }
+ /// Return the full path to the file.
+ std::string getAbsolutePath() const;
+ /// Return the file name relative to the compilation directory.
+ StringRef getRelativePath() const;
unsigned getLine() const { return Line; }
unsigned getColumn() const { return Column; }
};
/// Common features for diagnostics with an associated location.
class DiagnosticInfoWithLocationBase : public DiagnosticInfo {
+ virtual void anchor() override;
public:
/// \p Fn is the function where the diagnostic is being emitted. \p Loc is
/// the location information to use in the diagnostic.
@@ -375,9 +381,13 @@ public:
const std::string getLocationStr() const;
/// Return location information for this diagnostic in three parts:
- /// the source file name, line number and column.
- void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const;
+ /// the relative source file path, line number and column.
+ void getLocation(StringRef &RelativePath, unsigned &Line,
+ unsigned &Column) const;
+ /// Return the absolute path tot the file.
+ std::string getAbsolutePath() const;
+
const Function &getFunction() const { return Fn; }
DiagnosticLocation getLocation() const { return Loc; }
@@ -414,6 +424,7 @@ public:
Argument(StringRef Key, const Value *V);
Argument(StringRef Key, const Type *T);
Argument(StringRef Key, StringRef S);
+ Argument(StringRef Key, const char *S) : Argument(Key, StringRef(S)) {};
Argument(StringRef Key, int N);
Argument(StringRef Key, float N);
Argument(StringRef Key, long N);
@@ -590,6 +601,7 @@ operator<<(RemarkT &R,
/// Common features for diagnostics dealing with optimization remarks
/// that are used by IR passes.
class DiagnosticInfoIROptimization : public DiagnosticInfoOptimizationBase {
+ virtual void anchor() override;
public:
/// \p PassName is the name of the pass emitting this diagnostic. \p
/// RemarkName is a textual identifier for the remark (single-word,
@@ -810,6 +822,7 @@ private:
/// Diagnostic information for optimization analysis remarks related to
/// floating-point non-commutativity.
class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
+ virtual void anchor();
public:
/// \p PassName is the name of the pass emitting this diagnostic. If this name
/// matches the regular expression given in -Rpass-analysis=, then the
@@ -851,6 +864,7 @@ private:
/// Diagnostic information for optimization analysis remarks related to
/// pointer aliasing.
class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
+ virtual void anchor();
public:
/// \p PassName is the name of the pass emitting this diagnostic. If this name
/// matches the regular expression given in -Rpass-analysis=, then the
diff --git a/contrib/llvm/include/llvm/IR/DomTreeUpdater.h b/contrib/llvm/include/llvm/IR/DomTreeUpdater.h
index 81ba670ac0f5..e5bb092d21ca 100644
--- a/contrib/llvm/include/llvm/IR/DomTreeUpdater.h
+++ b/contrib/llvm/include/llvm/IR/DomTreeUpdater.h
@@ -159,11 +159,9 @@ public:
void callbackDeleteBB(BasicBlock *DelBB,
std::function<void(BasicBlock *)> Callback);
- /// Recalculate all available trees.
- /// Under Lazy Strategy, available trees will only be recalculated if there
- /// are pending updates or there is BasicBlock awaiting deletion. Returns true
- /// if at least one tree is recalculated.
- bool recalculate(Function &F);
+ /// Recalculate all available trees and flush all BasicBlocks
+ /// awaiting deletion immediately.
+ void recalculate(Function &F);
/// Flush DomTree updates and return DomTree.
/// It also flush out of date updates applied by all available trees
diff --git a/contrib/llvm/include/llvm/IR/Dominators.h b/contrib/llvm/include/llvm/IR/Dominators.h
index f9e992b0ef0c..f7da47d07663 100644
--- a/contrib/llvm/include/llvm/IR/Dominators.h
+++ b/contrib/llvm/include/llvm/IR/Dominators.h
@@ -37,15 +37,18 @@ extern template class DomTreeNodeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock, false>; // DomTree
extern template class DominatorTreeBase<BasicBlock, true>; // PostDomTree
+extern template class cfg::Update<BasicBlock *>;
+
namespace DomTreeBuilder {
using BBDomTree = DomTreeBase<BasicBlock>;
using BBPostDomTree = PostDomTreeBase<BasicBlock>;
-extern template struct Update<BasicBlock *>;
-
-using BBUpdates = ArrayRef<Update<BasicBlock *>>;
+using BBUpdates = ArrayRef<llvm::cfg::Update<BasicBlock *>>;
extern template void Calculate<BBDomTree>(BBDomTree &DT);
+extern template void CalculateWithUpdates<BBDomTree>(BBDomTree &DT,
+ BBUpdates U);
+
extern template void Calculate<BBPostDomTree>(BBPostDomTree &DT);
extern template void InsertEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
@@ -145,6 +148,9 @@ class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
DominatorTree() = default;
explicit DominatorTree(Function &F) { recalculate(F); }
+ explicit DominatorTree(DominatorTree &DT, DomTreeBuilder::BBUpdates U) {
+ recalculate(*DT.Parent, U);
+ }
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,
@@ -276,94 +282,6 @@ public:
void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
-
-//===-------------------------------------
-/// Class to defer updates to a DominatorTree.
-///
-/// Definition: Applying updates to every edge insertion and deletion is
-/// expensive and not necessary. When one needs the DominatorTree for analysis
-/// they can request a flush() to perform a larger batch update. This has the
-/// advantage of the DominatorTree inspecting the set of updates to find
-/// duplicates or unnecessary subtree updates.
-///
-/// The scope of DeferredDominance operates at a Function level.
-///
-/// It is not necessary for the user to scrub the updates for duplicates or
-/// updates that point to the same block (Delete, BB_A, BB_A). Performance
-/// can be gained if the caller attempts to batch updates before submitting
-/// to applyUpdates(ArrayRef) in cases where duplicate edge requests will
-/// occur.
-///
-/// It is required for the state of the LLVM IR to be applied *before*
-/// submitting updates. The update routines must analyze the current state
-/// between a pair of (From, To) basic blocks to determine if the update
-/// needs to be queued.
-/// Example (good):
-/// TerminatorInstructionBB->removeFromParent();
-/// DDT->deleteEdge(BB, Successor);
-/// Example (bad):
-/// DDT->deleteEdge(BB, Successor);
-/// TerminatorInstructionBB->removeFromParent();
-class DeferredDominance {
-public:
- DeferredDominance(DominatorTree &DT_) : DT(DT_) {}
-
- /// Queues multiple updates and discards duplicates.
- void applyUpdates(ArrayRef<DominatorTree::UpdateType> Updates);
-
- /// Helper method for a single edge insertion. It's almost always
- /// better to batch updates and call applyUpdates to quickly remove duplicate
- /// edges. This is best used when there is only a single insertion needed to
- /// update Dominators.
- void insertEdge(BasicBlock *From, BasicBlock *To);
-
- /// Helper method for a single edge deletion. It's almost always better
- /// to batch updates and call applyUpdates to quickly remove duplicate edges.
- /// This is best used when there is only a single deletion needed to update
- /// Dominators.
- void deleteEdge(BasicBlock *From, BasicBlock *To);
-
- /// Delays the deletion of a basic block until a flush() event.
- void deleteBB(BasicBlock *DelBB);
-
- /// Returns true if DelBB is awaiting deletion at a flush() event.
- bool pendingDeletedBB(BasicBlock *DelBB);
-
- /// Returns true if pending DT updates are queued for a flush() event.
- bool pending();
-
- /// Flushes all pending updates and block deletions. Returns a
- /// correct DominatorTree reference to be used by the caller for analysis.
- DominatorTree &flush();
-
- /// Drops all internal state and forces a (slow) recalculation of the
- /// DominatorTree based on the current state of the LLVM IR in F. This should
- /// only be used in corner cases such as the Entry block of F being deleted.
- void recalculate(Function &F);
-
- /// Debug method to help view the state of pending updates.
- LLVM_DUMP_METHOD void dump() const;
-
-private:
- DominatorTree &DT;
- SmallVector<DominatorTree::UpdateType, 16> PendUpdates;
- SmallPtrSet<BasicBlock *, 8> DeletedBBs;
-
- /// Apply an update (Kind, From, To) to the internal queued updates. The
- /// update is only added when determined to be necessary. Checks for
- /// self-domination, unnecessary updates, duplicate requests, and balanced
- /// pairs of requests are all performed. Returns true if the update is
- /// queued and false if it is discarded.
- bool applyUpdate(DominatorTree::UpdateKind Kind, BasicBlock *From,
- BasicBlock *To);
-
- /// Performs all pending basic block deletions. We have to defer the deletion
- /// of these blocks until after the DominatorTree updates are applied. The
- /// internal workings of the DominatorTree code expect every update's From
- /// and To blocks to exist and to be a member of the same Function.
- bool flushDelBB();
-};
-
} // end namespace llvm
#endif // LLVM_IR_DOMINATORS_H
diff --git a/contrib/llvm/include/llvm/IR/Function.h b/contrib/llvm/include/llvm/IR/Function.h
index 02e3ecc8e27f..630f47e8bb57 100644
--- a/contrib/llvm/include/llvm/IR/Function.h
+++ b/contrib/llvm/include/llvm/IR/Function.h
@@ -120,7 +120,7 @@ private:
/// function is automatically inserted into the end of the function list for
/// the module.
///
- Function(FunctionType *Ty, LinkageTypes Linkage,
+ Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
const Twine &N = "", Module *M = nullptr);
public:
@@ -134,17 +134,31 @@ public:
const Function &getFunction() const { return *this; }
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
+ unsigned AddrSpace, const Twine &N = "",
+ Module *M = nullptr) {
+ return new Function(Ty, Linkage, AddrSpace, N, M);
+ }
+
+ // TODO: remove this once all users have been updated to pass an AddrSpace
+ static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = nullptr) {
- return new Function(Ty, Linkage, N, M);
+ return new Function(Ty, Linkage, static_cast<unsigned>(-1), N, M);
}
+ /// Creates a new function and attaches it to a module.
+ ///
+ /// Places the function in the program address space as specified
+ /// by the module's data layout.
+ static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
+ const Twine &N, Module &M);
+
// Provide fast operand accessors.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Returns the number of non-debug IR instructions in this function.
/// This is equivalent to the sum of the sizes of each basic block contained
/// within this function.
- unsigned getInstructionCount();
+ unsigned getInstructionCount() const;
/// Returns the FunctionType for me.
FunctionType *getFunctionType() const {
diff --git a/contrib/llvm/include/llvm/IR/GlobalValue.h b/contrib/llvm/include/llvm/IR/GlobalValue.h
index 9d9f4f65a6b5..c07d4051c803 100644
--- a/contrib/llvm/include/llvm/IR/GlobalValue.h
+++ b/contrib/llvm/include/llvm/IR/GlobalValue.h
@@ -189,6 +189,7 @@ public:
GlobalValue(const GlobalValue &) = delete;
unsigned getAlignment() const;
+ unsigned getAddressSpace() const;
enum class UnnamedAddr {
None,
diff --git a/contrib/llvm/include/llvm/IR/IRBuilder.h b/contrib/llvm/include/llvm/IR/IRBuilder.h
index 70641ba25d2e..fac2ff46c453 100644
--- a/contrib/llvm/include/llvm/IR/IRBuilder.h
+++ b/contrib/llvm/include/llvm/IR/IRBuilder.h
@@ -651,7 +651,7 @@ public:
ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
const Twine &Name = "");
- // Conveninence function for the common case when CallArgs are filled in using
+ // Convenience function for the common case when CallArgs are filled in using
// makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
// get the Value *.
InvokeInst *
@@ -675,32 +675,44 @@ public:
Type *ResultType,
const Twine &Name = "");
+ /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
+ /// type.
+ CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
+ Instruction *FMFSource = nullptr,
+ const Twine &Name = "");
+
/// Create a call to intrinsic \p ID with 2 operands which is mangled on the
/// first type.
- CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID,
- Value *LHS, Value *RHS,
+ CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
+ Instruction *FMFSource = nullptr,
const Twine &Name = "");
- /// Create a call to intrinsic \p ID with no operands.
- CallInst *CreateIntrinsic(Intrinsic::ID ID,
- Instruction *FMFSource = nullptr,
- const Twine &Name = "");
-
- /// Create a call to intrinsic \p ID with 1 or more operands assuming the
- /// intrinsic and all operands have the same type. If \p FMFSource is
- /// provided, copy fast-math-flags from that instruction to the intrinsic.
- CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Value *> Args,
+ /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
+ /// \p FMFSource is provided, copy fast-math-flags from that instruction to
+ /// the intrinsic.
+ CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
+ ArrayRef<Value *> Args,
Instruction *FMFSource = nullptr,
const Twine &Name = "");
/// Create call to the minnum intrinsic.
CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
- return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, Name);
+ return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
}
/// Create call to the maxnum intrinsic.
CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
- return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, Name);
+ return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
+ }
+
+ /// Create call to the minimum intrinsic.
+ CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
+ }
+
+ /// Create call to the maximum intrinsic.
+ CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
}
private:
@@ -877,19 +889,59 @@ public:
}
/// Create an invoke instruction.
- InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
- BasicBlock *UnwindDest,
+ InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name = "") {
+ return Insert(
+ InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles),
+ Name);
+ }
+ InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest,
ArrayRef<Value *> Args = None,
const Twine &Name = "") {
- return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
+ return Insert(InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args),
Name);
}
+
+ InvokeInst *CreateInvoke(Function *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name = "") {
+ return CreateInvoke(Callee->getFunctionType(), Callee, NormalDest,
+ UnwindDest, Args, OpBundles, Name);
+ }
+
+ InvokeInst *CreateInvoke(Function *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest,
+ ArrayRef<Value *> Args = None,
+ const Twine &Name = "") {
+ return CreateInvoke(Callee->getFunctionType(), Callee, NormalDest,
+ UnwindDest, Args, Name);
+ }
+
+ // Deprecated [opaque pointer types]
InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
BasicBlock *UnwindDest, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> OpBundles,
const Twine &Name = "") {
- return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args,
- OpBundles), Name);
+ return CreateInvoke(
+ cast<FunctionType>(
+ cast<PointerType>(Callee->getType())->getElementType()),
+ Callee, NormalDest, UnwindDest, Args, OpBundles, Name);
+ }
+
+ // Deprecated [opaque pointer types]
+ InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest,
+ ArrayRef<Value *> Args = None,
+ const Twine &Name = "") {
+ return CreateInvoke(
+ cast<FunctionType>(
+ cast<PointerType>(Callee->getType())->getElementType()),
+ Callee, NormalDest, UnwindDest, Args, Name);
}
ResumeInst *CreateResume(Value *Exn) {
@@ -1300,22 +1352,35 @@ public:
return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
}
- /// Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of
+ /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
/// converting the string to 'bool' for the isVolatile parameter.
- LoadInst *CreateLoad(Value *Ptr, const char *Name) {
- return Insert(new LoadInst(Ptr), Name);
- }
-
- LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
- return Insert(new LoadInst(Ptr), Name);
+ LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
+ return Insert(new LoadInst(Ty, Ptr), Name);
}
LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
return Insert(new LoadInst(Ty, Ptr), Name);
}
+ LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
+ const Twine &Name = "") {
+ return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name);
+ }
+
+ // Deprecated [opaque pointer types]
+ LoadInst *CreateLoad(Value *Ptr, const char *Name) {
+ return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
+ }
+
+ // Deprecated [opaque pointer types]
+ LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
+ return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
+ }
+
+ // Deprecated [opaque pointer types]
LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
- return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
+ return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
+ Name);
}
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
@@ -1325,24 +1390,43 @@ public:
/// Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
/// correctly, instead of converting the string to 'bool' for the isVolatile
/// parameter.
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
- LoadInst *LI = CreateLoad(Ptr, Name);
+ LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
+ const char *Name) {
+ LoadInst *LI = CreateLoad(Ty, Ptr, Name);
LI->setAlignment(Align);
return LI;
}
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+ LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
const Twine &Name = "") {
- LoadInst *LI = CreateLoad(Ptr, Name);
+ LoadInst *LI = CreateLoad(Ty, Ptr, Name);
LI->setAlignment(Align);
return LI;
}
- LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
- const Twine &Name = "") {
- LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
+ LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
+ bool isVolatile, const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name);
LI->setAlignment(Align);
return LI;
}
+ // Deprecated [opaque pointer types]
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+ return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+ Align, Name);
+ }
+ // Deprecated [opaque pointer types]
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+ const Twine &Name = "") {
+ return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+ Align, Name);
+ }
+ // Deprecated [opaque pointer types]
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
+ const Twine &Name = "") {
+ return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
+ Align, isVolatile, Name);
+ }
+
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
bool isVolatile = false) {
StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
@@ -1479,50 +1563,69 @@ public:
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
}
- Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+ Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
+ const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name);
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
- return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name);
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
}
- Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
+ Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+ return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
+ }
+
+ Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name);
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
- return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
}
- Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
- const Twine &Name = "") {
+ Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
+ const Twine &Name = "") {
+ return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
+ }
+
+ Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+ const Twine &Name = "") {
Value *Idxs[] = {
ConstantInt::get(Type::getInt64Ty(Context), Idx0),
ConstantInt::get(Type::getInt64Ty(Context), Idx1)
};
if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name);
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
- return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name);
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
}
- Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
- const Twine &Name = "") {
+ Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+ const Twine &Name = "") {
+ return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
+ }
+
+ Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
+ uint64_t Idx1, const Twine &Name = "") {
Value *Idxs[] = {
ConstantInt::get(Type::getInt64Ty(Context), Idx0),
ConstantInt::get(Type::getInt64Ty(Context), Idx1)
};
if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs),
- Name);
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
- return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name);
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
+ }
+
+ Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+ const Twine &Name = "") {
+ return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
}
Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
@@ -1868,15 +1971,8 @@ public:
return Insert(PHINode::Create(Ty, NumReservedValues), Name);
}
- CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
- const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- auto *PTy = cast<PointerType>(Callee->getType());
- auto *FTy = cast<FunctionType>(PTy->getElementType());
- return CreateCall(FTy, Callee, Args, Name, FPMathTag);
- }
-
CallInst *CreateCall(FunctionType *FTy, Value *Callee,
- ArrayRef<Value *> Args, const Twine &Name = "",
+ ArrayRef<Value *> Args = None, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
if (isa<FPMathOperator>(CI))
@@ -1884,20 +1980,44 @@ public:
return Insert(CI, Name);
}
- CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+ CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> OpBundles,
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- CallInst *CI = CallInst::Create(Callee, Args, OpBundles);
+ CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
if (isa<FPMathOperator>(CI))
CI = cast<CallInst>(setFPAttrs(CI, FPMathTag, FMF));
return Insert(CI, Name);
}
- CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+ CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args = None,
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
return CreateCall(Callee->getFunctionType(), Callee, Args, Name, FPMathTag);
}
+ CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ return CreateCall(Callee->getFunctionType(), Callee, Args, OpBundles, Name,
+ FPMathTag);
+ }
+
+ // Deprecated [opaque pointer types]
+ CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ return CreateCall(
+ cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
+ Args, Name, FPMathTag);
+ }
+
+ // Deprecated [opaque pointer types]
+ CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ return CreateCall(
+ cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
+ Args, OpBundles, Name, FPMathTag);
+ }
+
Value *CreateSelect(Value *C, Value *True, Value *False,
const Twine &Name = "", Instruction *MDFrom = nullptr) {
if (auto *CC = dyn_cast<Constant>(C))
@@ -2114,11 +2234,12 @@ public:
private:
/// Helper function that creates an assume intrinsic call that
/// represents an alignment assumption on the provided Ptr, Mask, Type
- /// and Offset.
+ /// and Offset. It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
Value *PtrValue, Value *Mask,
- Type *IntPtrTy,
- Value *OffsetValue) {
+ Type *IntPtrTy, Value *OffsetValue,
+ Value **TheCheck) {
Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
if (OffsetValue) {
@@ -2137,6 +2258,9 @@ private:
Value *Zero = ConstantInt::get(IntPtrTy, 0);
Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+ if (TheCheck)
+ *TheCheck = InvCond;
+
return CreateAssumption(InvCond);
}
@@ -2147,9 +2271,13 @@ public:
/// An optional offset can be provided, and if it is provided, the offset
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
+ ///
+ /// It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
unsigned Alignment,
- Value *OffsetValue = nullptr) {
+ Value *OffsetValue = nullptr,
+ Value **TheCheck = nullptr) {
assert(isa<PointerType>(PtrValue->getType()) &&
"trying to create an alignment assumption on a non-pointer?");
auto *PtrTy = cast<PointerType>(PtrValue->getType());
@@ -2157,7 +2285,7 @@ public:
Value *Mask = ConstantInt::get(IntPtrTy, Alignment > 0 ? Alignment - 1 : 0);
return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
- OffsetValue);
+ OffsetValue, TheCheck);
}
/// Create an assume intrinsic call that represents an alignment
@@ -2167,11 +2295,15 @@ public:
/// must be subtracted from the provided pointer to get the pointer with the
/// specified alignment.
///
+ /// It may be sometimes useful to do some other logic
+ /// based on this alignment check, thus it can be stored into 'TheCheck'.
+ ///
/// This overload handles the condition where the Alignment is dependent
/// on an existing value rather than a static value.
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
Value *Alignment,
- Value *OffsetValue = nullptr) {
+ Value *OffsetValue = nullptr,
+ Value **TheCheck = nullptr) {
assert(isa<PointerType>(PtrValue->getType()) &&
"trying to create an alignment assumption on a non-pointer?");
auto *PtrTy = cast<PointerType>(PtrValue->getType());
@@ -2189,7 +2321,7 @@ public:
ConstantInt::get(IntPtrTy, 0), "mask");
return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
- OffsetValue);
+ OffsetValue, TheCheck);
}
};
diff --git a/contrib/llvm/include/llvm/IR/IRPrintingPasses.h b/contrib/llvm/include/llvm/IR/IRPrintingPasses.h
index e4ac5d4d88a3..75f80567dbd5 100644
--- a/contrib/llvm/include/llvm/IR/IRPrintingPasses.h
+++ b/contrib/llvm/include/llvm/IR/IRPrintingPasses.h
@@ -58,6 +58,22 @@ void printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name);
/// Return true if a pass is for IR printing.
bool isIRPrintingPass(Pass *P);
+/// isFunctionInPrintList - returns true if a function should be printed via
+// debugging options like -print-after-all/-print-before-all.
+// Tells if the function IR should be printed by PrinterPass.
+extern bool isFunctionInPrintList(StringRef FunctionName);
+
+/// forcePrintModuleIR - returns true if IR printing passes should
+// be printing module IR (even for local-pass printers e.g. function-pass)
+// to provide more context, as enabled by debugging option -print-module-scope
+// Tells if IR printer should be printing module IR
+extern bool forcePrintModuleIR();
+
+extern bool shouldPrintBeforePass();
+extern bool shouldPrintBeforePass(StringRef);
+extern bool shouldPrintAfterPass();
+extern bool shouldPrintAfterPass(StringRef);
+
/// Pass for printing a Module as LLVM's text IR assembly.
///
/// Note: This pass is for use with the new pass manager. Use the create...Pass
diff --git a/contrib/llvm/include/llvm/IR/InstVisitor.h b/contrib/llvm/include/llvm/IR/InstVisitor.h
index 65074025a083..c5b4c6f71d7d 100644
--- a/contrib/llvm/include/llvm/IR/InstVisitor.h
+++ b/contrib/llvm/include/llvm/IR/InstVisitor.h
@@ -166,15 +166,6 @@ public:
// Specific Instruction type classes... note that all of the casts are
// necessary because we use the instruction classes as opaque types...
//
- RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitCleanupReturnInst(CleanupReturnInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitCatchReturnInst(CatchReturnInst &I) { DELEGATE(TerminatorInst); }
- RetTy visitCatchSwitchInst(CatchSwitchInst &I) { DELEGATE(TerminatorInst);}
RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);}
@@ -211,10 +202,12 @@ public:
RetTy visitCatchPadInst(CatchPadInst &I) { DELEGATE(FuncletPadInst); }
// Handle the special instrinsic instruction classes.
- RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgInfoIntrinsic);}
- RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgVariableIntrinsic);}
+ RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgVariableIntrinsic);}
+ RetTy visitDbgVariableIntrinsic(DbgVariableIntrinsic &I)
+ { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
- RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
@@ -234,27 +227,64 @@ public:
return static_cast<SubClass*>(this)->visitCallSite(&I);
}
+ // While terminators don't have a distinct type modeling them, we support
+ // intercepting them with dedicated a visitor callback.
+ RetTy visitReturnInst(ReturnInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitBranchInst(BranchInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitSwitchInst(SwitchInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitIndirectBrInst(IndirectBrInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitResumeInst(ResumeInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitUnreachableInst(UnreachableInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitCleanupReturnInst(CleanupReturnInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitCatchReturnInst(CatchReturnInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitCatchSwitchInst(CatchSwitchInst &I) {
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+ }
+ RetTy visitTerminator(Instruction &I) { DELEGATE(Instruction);}
+
// Next level propagators: If the user does not overload a specific
// instruction type, they can overload one of these to get the whole class
// of instructions...
//
RetTy visitCastInst(CastInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitUnaryOperator(UnaryOperator &I) { DELEGATE(UnaryInstruction);}
RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction);}
RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction);}
- RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction);}
RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
- // Provide a special visitor for a 'callsite' that visits both calls and
- // invokes. When unimplemented, properly delegates to either the terminator or
- // regular instruction visitor.
+ // The next level delegation for `CallBase` is slightly more complex in order
+ // to support visiting cases where the call is also a terminator.
+ RetTy visitCallBase(CallBase &I) {
+ if (isa<InvokeInst>(I))
+ return static_cast<SubClass *>(this)->visitTerminator(I);
+
+ DELEGATE(Instruction);
+ }
+
+ // Provide a legacy visitor for a 'callsite' that visits both calls and
+ // invokes.
+ //
+ // Prefer overriding the type system based `CallBase` instead.
RetTy visitCallSite(CallSite CS) {
assert(CS);
Instruction &I = *CS.getInstruction();
- if (CS.isCall())
- DELEGATE(Instruction);
-
- assert(CS.isInvoke());
- DELEGATE(TerminatorInst);
+ DELEGATE(CallBase);
}
// If the user wants a 'default' case, they can choose to override this
diff --git a/contrib/llvm/include/llvm/IR/InstrTypes.h b/contrib/llvm/include/llvm/IR/InstrTypes.h
index ad0012048ac9..3f384a6ee40c 100644
--- a/contrib/llvm/include/llvm/IR/InstrTypes.h
+++ b/contrib/llvm/include/llvm/IR/InstrTypes.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instruction.h"
@@ -45,233 +46,9 @@
namespace llvm {
-//===----------------------------------------------------------------------===//
-// TerminatorInst Class
-//===----------------------------------------------------------------------===//
-
-/// Subclasses of this class are all able to terminate a basic
-/// block. Thus, these are all the flow control type of operations.
-///
-class TerminatorInst : public Instruction {
-protected:
- TerminatorInst(Type *Ty, Instruction::TermOps iType,
- Use *Ops, unsigned NumOps,
- Instruction *InsertBefore = nullptr)
- : Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
-
- TerminatorInst(Type *Ty, Instruction::TermOps iType,
- Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
- : Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
-
-public:
- /// Return the number of successors that this terminator has.
- unsigned getNumSuccessors() const;
-
- /// Return the specified successor.
- BasicBlock *getSuccessor(unsigned idx) const;
-
- /// Update the specified successor to point at the provided block.
- void setSuccessor(unsigned idx, BasicBlock *B);
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const Instruction *I) {
- return I->isTerminator();
- }
- static bool classof(const Value *V) {
- return isa<Instruction>(V) && classof(cast<Instruction>(V));
- }
-
- // Returns true if this terminator relates to exception handling.
- bool isExceptional() const {
- switch (getOpcode()) {
- case Instruction::CatchSwitch:
- case Instruction::CatchRet:
- case Instruction::CleanupRet:
- case Instruction::Invoke:
- case Instruction::Resume:
- return true;
- default:
- return false;
- }
- }
-
- //===--------------------------------------------------------------------===//
- // succ_iterator definition
- //===--------------------------------------------------------------------===//
-
- template <class Term, class BB> // Successor Iterator
- class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
- int, BB *, BB *> {
- using super =
- std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>;
-
- public:
- using pointer = typename super::pointer;
- using reference = typename super::reference;
-
- private:
- Term TermInst;
- unsigned idx;
- using Self = SuccIterator<Term, BB>;
-
- inline bool index_is_valid(unsigned idx) {
- return idx < TermInst->getNumSuccessors();
- }
-
- /// Proxy object to allow write access in operator[]
- class SuccessorProxy {
- Self it;
-
- public:
- explicit SuccessorProxy(const Self &it) : it(it) {}
-
- SuccessorProxy(const SuccessorProxy &) = default;
-
- SuccessorProxy &operator=(SuccessorProxy r) {
- *this = reference(r);
- return *this;
- }
-
- SuccessorProxy &operator=(reference r) {
- it.TermInst->setSuccessor(it.idx, r);
- return *this;
- }
-
- operator reference() const { return *it; }
- };
-
- public:
- // begin iterator
- explicit inline SuccIterator(Term T) : TermInst(T), idx(0) {}
- // end iterator
- inline SuccIterator(Term T, bool) : TermInst(T) {
- if (TermInst)
- idx = TermInst->getNumSuccessors();
- else
- // Term == NULL happens, if a basic block is not fully constructed and
- // consequently getTerminator() returns NULL. In this case we construct
- // a SuccIterator which describes a basic block that has zero
- // successors.
- // Defining SuccIterator for incomplete and malformed CFGs is especially
- // useful for debugging.
- idx = 0;
- }
-
- /// This is used to interface between code that wants to
- /// operate on terminator instructions directly.
- unsigned getSuccessorIndex() const { return idx; }
-
- inline bool operator==(const Self &x) const { return idx == x.idx; }
- inline bool operator!=(const Self &x) const { return !operator==(x); }
-
- inline reference operator*() const { return TermInst->getSuccessor(idx); }
- inline pointer operator->() const { return operator*(); }
-
- inline Self &operator++() {
- ++idx;
- return *this;
- } // Preincrement
-
- inline Self operator++(int) { // Postincrement
- Self tmp = *this;
- ++*this;
- return tmp;
- }
-
- inline Self &operator--() {
- --idx;
- return *this;
- } // Predecrement
- inline Self operator--(int) { // Postdecrement
- Self tmp = *this;
- --*this;
- return tmp;
- }
-
- inline bool operator<(const Self &x) const {
- assert(TermInst == x.TermInst &&
- "Cannot compare iterators of different blocks!");
- return idx < x.idx;
- }
-
- inline bool operator<=(const Self &x) const {
- assert(TermInst == x.TermInst &&
- "Cannot compare iterators of different blocks!");
- return idx <= x.idx;
- }
- inline bool operator>=(const Self &x) const {
- assert(TermInst == x.TermInst &&
- "Cannot compare iterators of different blocks!");
- return idx >= x.idx;
- }
-
- inline bool operator>(const Self &x) const {
- assert(TermInst == x.TermInst &&
- "Cannot compare iterators of different blocks!");
- return idx > x.idx;
- }
-
- inline Self &operator+=(int Right) {
- unsigned new_idx = idx + Right;
- assert(index_is_valid(new_idx) && "Iterator index out of bound");
- idx = new_idx;
- return *this;
- }
-
- inline Self operator+(int Right) const {
- Self tmp = *this;
- tmp += Right;
- return tmp;
- }
-
- inline Self &operator-=(int Right) { return operator+=(-Right); }
-
- inline Self operator-(int Right) const { return operator+(-Right); }
-
- inline int operator-(const Self &x) const {
- assert(TermInst == x.TermInst &&
- "Cannot work on iterators of different blocks!");
- int distance = idx - x.idx;
- return distance;
- }
-
- inline SuccessorProxy operator[](int offset) {
- Self tmp = *this;
- tmp += offset;
- return SuccessorProxy(tmp);
- }
-
- /// Get the source BB of this iterator.
- inline BB *getSource() {
- assert(TermInst && "Source not available, if basic block was malformed");
- return TermInst->getParent();
- }
- };
-
- using succ_iterator = SuccIterator<TerminatorInst *, BasicBlock>;
- using succ_const_iterator =
- SuccIterator<const TerminatorInst *, const BasicBlock>;
- using succ_range = iterator_range<succ_iterator>;
- using succ_const_range = iterator_range<succ_const_iterator>;
-
-private:
- inline succ_iterator succ_begin() { return succ_iterator(this); }
- inline succ_const_iterator succ_begin() const {
- return succ_const_iterator(this);
- }
- inline succ_iterator succ_end() { return succ_iterator(this, true); }
- inline succ_const_iterator succ_end() const {
- return succ_const_iterator(this, true);
- }
-
-public:
- inline succ_range successors() {
- return succ_range(succ_begin(), succ_end());
- }
- inline succ_const_range successors() const {
- return succ_const_range(succ_begin(), succ_end());
- }
-};
+namespace Intrinsic {
+enum ID : unsigned;
+}
//===----------------------------------------------------------------------===//
// UnaryInstruction Class
@@ -536,22 +313,6 @@ public:
static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
- /// Check if the given Value is a NEG, FNeg, or NOT instruction.
- ///
- static bool isNeg(const Value *V);
- static bool isFNeg(const Value *V, bool IgnoreZeroSign=false);
- static bool isNot(const Value *V);
-
- /// Helper functions to extract the unary argument of a NEG, FNEG or NOT
- /// operation implemented via Sub, FSub, or Xor.
- ///
- static const Value *getNegArgument(const Value *BinOp);
- static Value *getNegArgument( Value *BinOp);
- static const Value *getFNegArgument(const Value *BinOp);
- static Value *getFNegArgument( Value *BinOp);
- static const Value *getNotArgument(const Value *BinOp);
- static Value *getNotArgument( Value *BinOp);
-
BinaryOps getOpcode() const {
return static_cast<BinaryOps>(Instruction::getOpcode());
}
@@ -921,7 +682,8 @@ public:
protected:
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
Value *LHS, Value *RHS, const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
+ Instruction *InsertBefore = nullptr,
+ Instruction *FlagsSource = nullptr);
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
Value *LHS, Value *RHS, const Twine &Name,
@@ -1147,76 +909,6 @@ struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
-//===----------------------------------------------------------------------===//
-// FuncletPadInst Class
-//===----------------------------------------------------------------------===//
-class FuncletPadInst : public Instruction {
-private:
- FuncletPadInst(const FuncletPadInst &CPI);
-
- explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
- ArrayRef<Value *> Args, unsigned Values,
- const Twine &NameStr, Instruction *InsertBefore);
- explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
- ArrayRef<Value *> Args, unsigned Values,
- const Twine &NameStr, BasicBlock *InsertAtEnd);
-
- void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
-
-protected:
- // Note: Instruction needs to be a friend here to call cloneImpl.
- friend class Instruction;
- friend class CatchPadInst;
- friend class CleanupPadInst;
-
- FuncletPadInst *cloneImpl() const;
-
-public:
- /// Provide fast operand accessors
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
-
- /// getNumArgOperands - Return the number of funcletpad arguments.
- ///
- unsigned getNumArgOperands() const { return getNumOperands() - 1; }
-
- /// Convenience accessors
-
- /// Return the outer EH-pad this funclet is nested within.
- ///
- /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
- /// is a CatchPadInst.
- Value *getParentPad() const { return Op<-1>(); }
- void setParentPad(Value *ParentPad) {
- assert(ParentPad);
- Op<-1>() = ParentPad;
- }
-
- /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
- ///
- Value *getArgOperand(unsigned i) const { return getOperand(i); }
- void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
-
- /// arg_operands - iteration adapter for range-for loops.
- op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
-
- /// arg_operands - iteration adapter for range-for loops.
- const_op_range arg_operands() const {
- return const_op_range(op_begin(), op_end() - 1);
- }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const Instruction *I) { return I->isFuncletPad(); }
- static bool classof(const Value *V) {
- return isa<Instruction>(V) && classof(cast<Instruction>(V));
- }
-};
-
-template <>
-struct OperandTraits<FuncletPadInst>
- : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
-
-DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)
-
/// A lightweight accessor for an operand bundle meant to be passed
/// around by value.
struct OperandBundleUse {
@@ -1301,54 +993,609 @@ public:
using OperandBundleDef = OperandBundleDefT<Value *>;
using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
-/// A mixin to add operand bundle functionality to llvm instruction
-/// classes.
-///
-/// OperandBundleUser uses the descriptor area co-allocated with the host User
-/// to store some meta information about which operands are "normal" operands,
-/// and which ones belong to some operand bundle.
-///
-/// The layout of an operand bundle user is
-///
-/// +-----------uint32_t End-------------------------------------+
-/// | |
-/// | +--------uint32_t Begin--------------------+ |
-/// | | | |
-/// ^ ^ v v
-/// |------|------|----|----|----|----|----|---------|----|---------|----|-----
-/// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
-/// |------|------|----|----|----|----|----|---------|----|---------|----|-----
-/// v v ^ ^
-/// | | | |
-/// | +--------uint32_t Begin------------+ |
-/// | |
-/// +-----------uint32_t End-----------------------------+
-///
-///
-/// BOI0, BOI1 ... are descriptions of operand bundles in this User's use list.
-/// These descriptions are installed and managed by this class, and they're all
-/// instances of OperandBundleUser<T>::BundleOpInfo.
-///
-/// DU is an additional descriptor installed by User's 'operator new' to keep
-/// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
-/// access or modify DU in any way, it's an implementation detail private to
-/// User.
-///
-/// The regular Use& vector for the User starts at U0. The operand bundle uses
-/// are part of the Use& vector, just like normal uses. In the diagram above,
-/// the operand bundle uses start at BOI0_U0. Each instance of BundleOpInfo has
-/// information about a contiguous set of uses constituting an operand bundle,
-/// and the total set of operand bundle uses themselves form a contiguous set of
-/// uses (i.e. there are no gaps between uses corresponding to individual
-/// operand bundles).
+//===----------------------------------------------------------------------===//
+// CallBase Class
+//===----------------------------------------------------------------------===//
+
+/// Base class for all callable instructions (InvokeInst and CallInst)
+/// Holds everything related to calling a function.
///
-/// This class does not know the location of the set of operand bundle uses
-/// within the use list -- that is decided by the User using this class via the
-/// BeginIdx argument in populateBundleOperandInfos.
+/// All call-like instructions are required to use a common operand layout:
+/// - Zero or more arguments to the call,
+/// - Zero or more operand bundles with zero or more operand inputs each
+/// bundle,
+/// - Zero or more subclass controlled operands
+/// - The called function.
///
-/// Currently operand bundle users with hung-off operands are not supported.
-template <typename InstrTy, typename OpIteratorTy> class OperandBundleUser {
+/// This allows this base class to easily access the called function and the
+/// start of the arguments without knowing how many other operands a particular
+/// subclass requires. Note that accessing the end of the argument list isn't
+/// as cheap as most other operations on the base class.
+class CallBase : public Instruction {
+protected:
+ /// The last operand is the called operand.
+ static constexpr int CalledOperandOpEndIdx = -1;
+
+ AttributeList Attrs; ///< parameter attributes for callable
+ FunctionType *FTy;
+
+ template <class... ArgsTy>
+ CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
+ : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
+
+ using Instruction::Instruction;
+
+ bool hasDescriptor() const { return Value::HasDescriptor; }
+
+ unsigned getNumSubclassExtraOperands() const {
+ switch (getOpcode()) {
+ case Instruction::Call:
+ return 0;
+ case Instruction::Invoke:
+ return 2;
+ }
+ llvm_unreachable("Invalid opcode!");
+ }
+
public:
+ using Instruction::getContext;
+
+ static bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Call ||
+ I->getOpcode() == Instruction::Invoke;
+ }
+ static bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+ FunctionType *getFunctionType() const { return FTy; }
+
+ void mutateFunctionType(FunctionType *FTy) {
+ Value::mutateType(FTy->getReturnType());
+ this->FTy = FTy;
+ }
+
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// data_operands_begin/data_operands_end - Return iterators iterating over
+ /// the call / invoke argument list and bundle operands. For invokes, this is
+ /// the set of instruction operands except the invoke target and the two
+ /// successor blocks; and for calls this is the set of instruction operands
+ /// except the call target.
+ User::op_iterator data_operands_begin() { return op_begin(); }
+ User::const_op_iterator data_operands_begin() const {
+ return const_cast<CallBase *>(this)->data_operands_begin();
+ }
+ User::op_iterator data_operands_end() {
+ // Walk from the end of the operands over the called operand and any
+ // subclass operands.
+ return op_end() - getNumSubclassExtraOperands() - 1;
+ }
+ User::const_op_iterator data_operands_end() const {
+ return const_cast<CallBase *>(this)->data_operands_end();
+ }
+ iterator_range<User::op_iterator> data_ops() {
+ return make_range(data_operands_begin(), data_operands_end());
+ }
+ iterator_range<User::const_op_iterator> data_ops() const {
+ return make_range(data_operands_begin(), data_operands_end());
+ }
+ bool data_operands_empty() const {
+ return data_operands_end() == data_operands_begin();
+ }
+ unsigned data_operands_size() const {
+ return std::distance(data_operands_begin(), data_operands_end());
+ }
+
+ bool isDataOperand(const Use *U) const {
+ assert(this == U->getUser() &&
+ "Only valid to query with a use of this instruction!");
+ return data_operands_begin() <= U && U < data_operands_end();
+ }
+ bool isDataOperand(Value::const_user_iterator UI) const {
+ return isDataOperand(&UI.getUse());
+ }
+
+ /// Return the iterator pointing to the beginning of the argument list.
+ User::op_iterator arg_begin() { return op_begin(); }
+ User::const_op_iterator arg_begin() const {
+ return const_cast<CallBase *>(this)->arg_begin();
+ }
+
+ /// Return the iterator pointing to the end of the argument list.
+ User::op_iterator arg_end() {
+ // From the end of the data operands, walk backwards past the bundle
+ // operands.
+ return data_operands_end() - getNumTotalBundleOperands();
+ }
+ User::const_op_iterator arg_end() const {
+ return const_cast<CallBase *>(this)->arg_end();
+ }
+
+ /// Iteration adapter for range-for loops.
+ iterator_range<User::op_iterator> args() {
+ return make_range(arg_begin(), arg_end());
+ }
+ iterator_range<User::const_op_iterator> args() const {
+ return make_range(arg_begin(), arg_end());
+ }
+ bool arg_empty() const { return arg_end() == arg_begin(); }
+ unsigned arg_size() const { return arg_end() - arg_begin(); }
+
+ // Legacy API names that duplicate the above and will be removed once users
+ // are migrated.
+ iterator_range<User::op_iterator> arg_operands() {
+ return make_range(arg_begin(), arg_end());
+ }
+ iterator_range<User::const_op_iterator> arg_operands() const {
+ return make_range(arg_begin(), arg_end());
+ }
+ unsigned getNumArgOperands() const { return arg_size(); }
+
+ Value *getArgOperand(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperand(i);
+ }
+
+ void setArgOperand(unsigned i, Value *v) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ setOperand(i, v);
+ }
+
+ /// Wrappers for getting the \c Use of a call argument.
+ const Use &getArgOperandUse(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return User::getOperandUse(i);
+ }
+ Use &getArgOperandUse(unsigned i) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return User::getOperandUse(i);
+ }
+
+ bool isArgOperand(const Use *U) const {
+ assert(this == U->getUser() &&
+ "Only valid to query with a use of this instruction!");
+ return arg_begin() <= U && U < arg_end();
+ }
+ bool isArgOperand(Value::const_user_iterator UI) const {
+ return isArgOperand(&UI.getUse());
+ }
+
+ /// Returns true if this CallSite passes the given Value* as an argument to
+ /// the called function.
+ bool hasArgument(const Value *V) const {
+ return llvm::any_of(args(), [V](const Value *Arg) { return Arg == V; });
+ }
+
+ Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
+
+ // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in
+ // the near future.
+ Value *getCalledValue() const { return getCalledOperand(); }
+
+ const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
+ Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
+
+ /// Returns the function called, or null if this is an
+ /// indirect function invocation.
+ Function *getCalledFunction() const {
+ return dyn_cast_or_null<Function>(getCalledOperand());
+ }
+
+ /// Return true if the callsite is an indirect call.
+ bool isIndirectCall() const;
+
+ /// Determine whether the passed iterator points to the callee operand's Use.
+ bool isCallee(Value::const_user_iterator UI) const {
+ return isCallee(&UI.getUse());
+ }
+
+ /// Determine whether this Use is the callee operand's Use.
+ bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
+
+ /// Helper to get the caller (the parent function).
+ Function *getCaller();
+ const Function *getCaller() const {
+ return const_cast<CallBase *>(this)->getCaller();
+ }
+
+ /// Returns the intrinsic ID of the intrinsic called or
+ /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
+ /// this is an indirect call.
+ Intrinsic::ID getIntrinsicID() const;
+
+ void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
+
+ /// Sets the function called, including updating the function type.
+ void setCalledFunction(Value *Fn) {
+ setCalledFunction(
+ cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
+ Fn);
+ }
+
+ /// Sets the function called, including updating to the specified function
+ /// type.
+ void setCalledFunction(FunctionType *FTy, Value *Fn) {
+ this->FTy = FTy;
+ assert(FTy == cast<FunctionType>(
+ cast<PointerType>(Fn->getType())->getElementType()));
+ setCalledOperand(Fn);
+ }
+
+ CallingConv::ID getCallingConv() const {
+ return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
+ }
+
+ void setCallingConv(CallingConv::ID CC) {
+ auto ID = static_cast<unsigned>(CC);
+ assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
+ (ID << 2));
+ }
+
+ /// \name Attribute API
+ ///
+ /// These methods access and modify attributes on this call (including
+ /// looking through to the attributes on the called function when necessary).
+ ///@{
+
+ /// Return the parameter attributes for this call.
+ ///
+ AttributeList getAttributes() const { return Attrs; }
+
+ /// Set the parameter attributes for this call.
+ ///
+ void setAttributes(AttributeList A) { Attrs = A; }
+
+ /// Determine whether this call has the given attribute.
+ bool hasFnAttr(Attribute::AttrKind Kind) const {
+ assert(Kind != Attribute::NoBuiltin &&
+ "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin");
+ return hasFnAttrImpl(Kind);
+ }
+
+ /// Determine whether this call has the given attribute.
+ bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
+
+ /// adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, Kind);
+ setAttributes(PAL);
+ }
+
+ /// adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, Attribute Attr) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, Attr);
+ setAttributes(PAL);
+ }
+
+ /// Adds the attribute to the indicated argument
+ void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+ }
+
+ /// Adds the attribute to the indicated argument
+ void addParamAttr(unsigned ArgNo, Attribute Attr) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
+ setAttributes(PAL);
+ }
+
+ /// removes the attribute from the list of attributes.
+ void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeAttribute(getContext(), i, Kind);
+ setAttributes(PAL);
+ }
+
+ /// removes the attribute from the list of attributes.
+ void removeAttribute(unsigned i, StringRef Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeAttribute(getContext(), i, Kind);
+ setAttributes(PAL);
+ }
+
+ /// Removes the attribute from the given argument
+ void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+ }
+
+ /// Removes the attribute from the given argument
+ void removeParamAttr(unsigned ArgNo, StringRef Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+ }
+
+ /// adds the dereferenceable attribute to the list of attributes.
+ void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+ }
+
+ /// adds the dereferenceable_or_null attribute to the list of
+ /// attributes.
+ void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+ }
+
+ /// Determine whether the return value has the given attribute.
+ bool hasRetAttr(Attribute::AttrKind Kind) const;
+
+ /// Determine whether the argument or parameter has the given attribute.
+ bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
+
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
+
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, StringRef Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
+
+ /// Get the attribute of a given kind from a given arg
+ Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ return getAttributes().getParamAttr(ArgNo, Kind);
+ }
+
+ /// Get the attribute of a given kind from a given arg
+ Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ return getAttributes().getParamAttr(ArgNo, Kind);
+ }
+
+ /// Return true if the data operand at index \p i has the attribute \p
+ /// A.
+ ///
+ /// Data operands include call arguments and values used in operand bundles,
+ /// but does not include the callee operand. This routine dispatches to the
+ /// underlying AttributeList or the OperandBundleUser as appropriate.
+ ///
+ /// The index \p i is interpreted as
+ ///
+ /// \p i == Attribute::ReturnIndex -> the return value
+ /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
+ /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
+ /// (\p i - 1) in the operand list.
+ bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
+ // Note that we have to add one because `i` isn't zero-indexed.
+ assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&
+ "Data operand index out of bounds!");
+
+ // The attribute A can either be directly specified, if the operand in
+ // question is a call argument; or be indirectly implied by the kind of its
+ // containing operand bundle, if the operand is a bundle operand.
+
+ if (i == AttributeList::ReturnIndex)
+ return hasRetAttr(Kind);
+
+ // FIXME: Avoid these i - 1 calculations and update the API to use
+ // zero-based indices.
+ if (i < (getNumArgOperands() + 1))
+ return paramHasAttr(i - 1, Kind);
+
+ assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
+ "Must be either a call argument or an operand bundle!");
+ return bundleOperandHasAttr(i - 1, Kind);
+ }
+
+ /// Determine whether this data operand is not captured.
+ // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+ // better indicate that this may return a conservative answer.
+ bool doesNotCapture(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
+ }
+
+ /// Determine whether this argument is passed by value.
+ bool isByValArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo, Attribute::ByVal);
+ }
+
+ /// Determine whether this argument is passed in an alloca.
+ bool isInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo, Attribute::InAlloca);
+ }
+
+ /// Determine whether this argument is passed by value or in an alloca.
+ bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo, Attribute::ByVal) ||
+ paramHasAttr(ArgNo, Attribute::InAlloca);
+ }
+
+ /// Determine if there are is an inalloca argument. Only the last argument can
+ /// have the inalloca attribute.
+ bool hasInAllocaArgument() const {
+ return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
+ }
+
+ // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+ // better indicate that this may return a conservative answer.
+ bool doesNotAccessMemory(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+ }
+
+ // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+ // better indicate that this may return a conservative answer.
+ bool onlyReadsMemory(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
+ dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+ }
+
+ // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
+ // better indicate that this may return a conservative answer.
+ bool doesNotReadMemory(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
+ dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+ }
+
+ /// Extract the alignment of the return value.
+ unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
+
+ /// Extract the alignment for a call or parameter (0=unknown).
+ unsigned getParamAlignment(unsigned ArgNo) const {
+ return Attrs.getParamAlignment(ArgNo);
+ }
+
+ /// Extract the number of dereferenceable bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableBytes(unsigned i) const {
+ return Attrs.getDereferenceableBytes(i);
+ }
+
+ /// Extract the number of dereferenceable_or_null bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+ return Attrs.getDereferenceableOrNullBytes(i);
+ }
+
+ /// Return true if the return value is known to be not null.
+ /// This may be because it has the nonnull attribute, or because at least
+ /// one byte is dereferenceable and the pointer is in addrspace(0).
+ bool isReturnNonNull() const;
+
+ /// Determine if the return value is marked with NoAlias attribute.
+ bool returnDoesNotAlias() const {
+ return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+ }
+
+ /// If one of the arguments has the 'returned' attribute, returns its
+ /// operand value. Otherwise, return nullptr.
+ Value *getReturnedArgOperand() const;
+
+ /// Return true if the call should not be treated as a call to a
+ /// builtin.
+ bool isNoBuiltin() const {
+ return hasFnAttrImpl(Attribute::NoBuiltin) &&
+ !hasFnAttrImpl(Attribute::Builtin);
+ }
+
+ /// Determine if the call requires strict floating point semantics.
+ bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
+
+ /// Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
+ }
+ /// Determine if the call does not access memory.
+ bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
+ void setDoesNotAccessMemory() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
+ }
+
+ /// Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ }
+ void setOnlyReadsMemory() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
+ }
+
+ /// Determine if the call does not access or only writes memory.
+ bool doesNotReadMemory() const {
+ return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
+ }
+ void setDoesNotReadMemory() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
+ }
+
+ /// Determine if the call can access memmory only using pointers based
+ /// on its arguments.
+ bool onlyAccessesArgMemory() const {
+ return hasFnAttr(Attribute::ArgMemOnly);
+ }
+ void setOnlyAccessesArgMemory() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
+ }
+
+ /// Determine if the function may only access memory that is
+ /// inaccessible from the IR.
+ bool onlyAccessesInaccessibleMemory() const {
+ return hasFnAttr(Attribute::InaccessibleMemOnly);
+ }
+ void setOnlyAccessesInaccessibleMemory() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
+ }
+
+ /// Determine if the function may only access memory that is
+ /// either inaccessible from the IR or pointed to by its arguments.
+ bool onlyAccessesInaccessibleMemOrArgMem() const {
+ return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
+ }
+ void setOnlyAccessesInaccessibleMemOrArgMem() {
+ addAttribute(AttributeList::FunctionIndex,
+ Attribute::InaccessibleMemOrArgMemOnly);
+ }
+ /// Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
+ }
+
+ /// Determine if the call should not perform indirect branch tracking.
+ bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
+
+ /// Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
+ }
+
+ /// Determine if the invoke cannot be duplicated.
+ bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
+ void setCannotDuplicate() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
+ }
+
+ /// Determine if the invoke is convergent
+ bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
+ void setConvergent() {
+ addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
+ }
+ void setNotConvergent() {
+ removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
+ }
+
+ /// Determine if the call returns a structure through first
+ /// pointer argument.
+ bool hasStructRetAttr() const {
+ if (getNumArgOperands() == 0)
+ return false;
+
+ // Be friendly and also check the callee.
+ return paramHasAttr(0, Attribute::StructRet);
+ }
+
+ /// Determine if any call argument is an aggregate passed by value.
+ bool hasByValArgument() const {
+ return Attrs.hasAttrSomewhere(Attribute::ByVal);
+ }
+
+ ///@{
+ // End of attribute API.
+
+ /// \name Operand Bundle API
+ ///
+ /// This group of methods provides the API to access and manipulate operand
+ /// bundles on this call.
+ /// @{
+
/// Return the number of operand bundles associated with this User.
unsigned getNumOperandBundles() const {
return std::distance(bundle_op_info_begin(), bundle_op_info_end());
@@ -1375,6 +1622,16 @@ public:
Idx < getBundleOperandsEndIndex();
}
+ /// Returns true if the use is a bundle operand.
+ bool isBundleOperand(const Use *U) const {
+ assert(this == U->getUser() &&
+ "Only valid to query with a use of this instruction!");
+ return hasOperandBundles() && isBundleOperand(U - op_begin());
+ }
+ bool isBundleOperand(Value::const_user_iterator UI) const {
+ return isBundleOperand(&UI.getUse());
+ }
+
/// Return the total number operands (not operand bundles) used by
/// every operand bundle in this OperandBundleUser.
unsigned getNumTotalBundleOperands() const {
@@ -1504,8 +1761,7 @@ public:
/// Return true if \p Other has the same sequence of operand bundle
/// tags with the same number of operands on each one of them as this
/// OperandBundleUser.
- bool hasIdenticalOperandBundleSchema(
- const OperandBundleUser<InstrTy, OpIteratorTy> &Other) const {
+ bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
if (getNumOperandBundles() != Other.getNumOperandBundles())
return false;
@@ -1524,7 +1780,6 @@ public:
return false;
}
-protected:
/// Is the function attribute S disallowed by some operand bundle on
/// this operand bundle user?
bool isFnAttrDisallowedByOpBundle(StringRef S) const {
@@ -1583,8 +1838,8 @@ protected:
/// OperandBundleUse.
OperandBundleUse
operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
- auto op_begin = static_cast<const InstrTy *>(this)->op_begin();
- ArrayRef<Use> Inputs(op_begin + BOI.Begin, op_begin + BOI.End);
+ auto begin = op_begin();
+ ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
return OperandBundleUse(BOI.Tag, Inputs);
}
@@ -1593,37 +1848,79 @@ protected:
/// Return the start of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
+ ///
+ /// OperandBundleUser uses the descriptor area co-allocated with the host User
+ /// to store some meta information about which operands are "normal" operands,
+ /// and which ones belong to some operand bundle.
+ ///
+ /// The layout of an operand bundle user is
+ ///
+ /// +-----------uint32_t End-------------------------------------+
+ /// | |
+ /// | +--------uint32_t Begin--------------------+ |
+ /// | | | |
+ /// ^ ^ v v
+ /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
+ /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
+ /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
+ /// v v ^ ^
+ /// | | | |
+ /// | +--------uint32_t Begin------------+ |
+ /// | |
+ /// +-----------uint32_t End-----------------------------+
+ ///
+ ///
+ /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
+ /// list. These descriptions are installed and managed by this class, and
+ /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
+ ///
+ /// DU is an additional descriptor installed by User's 'operator new' to keep
+ /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
+ /// access or modify DU in any way, it's an implementation detail private to
+ /// User.
+ ///
+ /// The regular Use& vector for the User starts at U0. The operand bundle
+ /// uses are part of the Use& vector, just like normal uses. In the diagram
+ /// above, the operand bundle uses start at BOI0_U0. Each instance of
+ /// BundleOpInfo has information about a contiguous set of uses constituting
+ /// an operand bundle, and the total set of operand bundle uses themselves
+ /// form a contiguous set of uses (i.e. there are no gaps between uses
+ /// corresponding to individual operand bundles).
+ ///
+ /// This class does not know the location of the set of operand bundle uses
+ /// within the use list -- that is decided by the User using this class via
+ /// the BeginIdx argument in populateBundleOperandInfos.
+ ///
+ /// Currently operand bundle users with hung-off operands are not supported.
bundle_op_iterator bundle_op_info_begin() {
- if (!static_cast<InstrTy *>(this)->hasDescriptor())
+ if (!hasDescriptor())
return nullptr;
- uint8_t *BytesBegin = static_cast<InstrTy *>(this)->getDescriptor().begin();
+ uint8_t *BytesBegin = getDescriptor().begin();
return reinterpret_cast<bundle_op_iterator>(BytesBegin);
}
/// Return the start of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
const_bundle_op_iterator bundle_op_info_begin() const {
- auto *NonConstThis =
- const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+ auto *NonConstThis = const_cast<CallBase *>(this);
return NonConstThis->bundle_op_info_begin();
}
/// Return the end of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
bundle_op_iterator bundle_op_info_end() {
- if (!static_cast<InstrTy *>(this)->hasDescriptor())
+ if (!hasDescriptor())
return nullptr;
- uint8_t *BytesEnd = static_cast<InstrTy *>(this)->getDescriptor().end();
+ uint8_t *BytesEnd = getDescriptor().end();
return reinterpret_cast<bundle_op_iterator>(BytesEnd);
}
/// Return the end of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
const_bundle_op_iterator bundle_op_info_end() const {
- auto *NonConstThis =
- const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+ auto *NonConstThis = const_cast<CallBase *>(this);
return NonConstThis->bundle_op_info_end();
}
@@ -1643,30 +1940,8 @@ protected:
///
/// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
/// instance allocated in this User's descriptor.
- OpIteratorTy populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
- const unsigned BeginIndex) {
- auto It = static_cast<InstrTy *>(this)->op_begin() + BeginIndex;
- for (auto &B : Bundles)
- It = std::copy(B.input_begin(), B.input_end(), It);
-
- auto *ContextImpl = static_cast<InstrTy *>(this)->getContext().pImpl;
- auto BI = Bundles.begin();
- unsigned CurrentIndex = BeginIndex;
-
- for (auto &BOI : bundle_op_infos()) {
- assert(BI != Bundles.end() && "Incorrect allocation?");
-
- BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
- BOI.Begin = CurrentIndex;
- BOI.End = CurrentIndex + BI->input_size();
- CurrentIndex = BOI.End;
- BI++;
- }
-
- assert(BI == Bundles.end() && "Incorrect allocation?");
-
- return It;
- }
+ op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
+ const unsigned BeginIndex);
/// Return the BundleOpInfo for the operand at index OpIdx.
///
@@ -1680,6 +1955,7 @@ protected:
llvm_unreachable("Did not find operand bundle for operand!");
}
+protected:
/// Return the total number of values used in \p Bundles.
static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
unsigned Total = 0;
@@ -1687,8 +1963,102 @@ protected:
Total += B.input_size();
return Total;
}
+
+ /// @}
+ // End of operand bundle API.
+
+private:
+ bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
+ bool hasFnAttrOnCalledFunction(StringRef Kind) const;
+
+ template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
+ if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
+ return true;
+
+ // Operand bundles override attributes on the called function, but don't
+ // override attributes directly present on the call instruction.
+ if (isFnAttrDisallowedByOpBundle(Kind))
+ return false;
+
+ return hasFnAttrOnCalledFunction(Kind);
+ }
};
+template <>
+struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)
+
+//===----------------------------------------------------------------------===//
+// FuncletPadInst Class
+//===----------------------------------------------------------------------===//
+class FuncletPadInst : public Instruction {
+private:
+ FuncletPadInst(const FuncletPadInst &CPI);
+
+ explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore);
+ explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ friend class CatchPadInst;
+ friend class CleanupPadInst;
+
+ FuncletPadInst *cloneImpl() const;
+
+public:
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// getNumArgOperands - Return the number of funcletpad arguments.
+ ///
+ unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+
+ /// Convenience accessors
+
+ /// Return the outer EH-pad this funclet is nested within.
+ ///
+ /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
+ /// is a CatchPadInst.
+ Value *getParentPad() const { return Op<-1>(); }
+ void setParentPad(Value *ParentPad) {
+ assert(ParentPad);
+ Op<-1>() = ParentPad;
+ }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
+ ///
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ const_op_range arg_operands() const {
+ return const_op_range(op_begin(), op_end() - 1);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Instruction *I) { return I->isFuncletPad(); }
+ static bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<FuncletPadInst>
+ : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)
+
} // end namespace llvm
#endif // LLVM_IR_INSTRTYPES_H
diff --git a/contrib/llvm/include/llvm/IR/Instruction.def b/contrib/llvm/include/llvm/IR/Instruction.def
index 86617299c44a..58e4e2e1d6cc 100644
--- a/contrib/llvm/include/llvm/IR/Instruction.def
+++ b/contrib/llvm/include/llvm/IR/Instruction.def
@@ -32,6 +32,20 @@
#define LAST_TERM_INST(num)
#endif
+#ifndef FIRST_UNARY_INST
+#define FIRST_UNARY_INST(num)
+#endif
+#ifndef HANDLE_UNARY_INST
+#ifndef HANDLE_INST
+#define HANDLE_UNARY_INST(num, opcode, instclass)
+#else
+#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_UNARY_INST
+#define LAST_UNARY_INST(num)
+#endif
+
#ifndef FIRST_BINARY_INST
#define FIRST_BINARY_INST(num)
#endif
@@ -123,87 +137,96 @@ HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
LAST_TERM_INST (10)
+// Standard unary operators...
+ FIRST_UNARY_INST(11)
+HANDLE_UNARY_INST(11, FNeg , UnaryOperator)
+ LAST_UNARY_INST(11)
+
// Standard binary operators...
- FIRST_BINARY_INST(11)
-HANDLE_BINARY_INST(11, Add , BinaryOperator)
-HANDLE_BINARY_INST(12, FAdd , BinaryOperator)
-HANDLE_BINARY_INST(13, Sub , BinaryOperator)
-HANDLE_BINARY_INST(14, FSub , BinaryOperator)
-HANDLE_BINARY_INST(15, Mul , BinaryOperator)
-HANDLE_BINARY_INST(16, FMul , BinaryOperator)
-HANDLE_BINARY_INST(17, UDiv , BinaryOperator)
-HANDLE_BINARY_INST(18, SDiv , BinaryOperator)
-HANDLE_BINARY_INST(19, FDiv , BinaryOperator)
-HANDLE_BINARY_INST(20, URem , BinaryOperator)
-HANDLE_BINARY_INST(21, SRem , BinaryOperator)
-HANDLE_BINARY_INST(22, FRem , BinaryOperator)
+ FIRST_BINARY_INST(12)
+HANDLE_BINARY_INST(12, Add , BinaryOperator)
+HANDLE_BINARY_INST(13, FAdd , BinaryOperator)
+HANDLE_BINARY_INST(14, Sub , BinaryOperator)
+HANDLE_BINARY_INST(15, FSub , BinaryOperator)
+HANDLE_BINARY_INST(16, Mul , BinaryOperator)
+HANDLE_BINARY_INST(17, FMul , BinaryOperator)
+HANDLE_BINARY_INST(18, UDiv , BinaryOperator)
+HANDLE_BINARY_INST(19, SDiv , BinaryOperator)
+HANDLE_BINARY_INST(20, FDiv , BinaryOperator)
+HANDLE_BINARY_INST(21, URem , BinaryOperator)
+HANDLE_BINARY_INST(22, SRem , BinaryOperator)
+HANDLE_BINARY_INST(23, FRem , BinaryOperator)
// Logical operators (integer operands)
-HANDLE_BINARY_INST(23, Shl , BinaryOperator) // Shift left (logical)
-HANDLE_BINARY_INST(24, LShr , BinaryOperator) // Shift right (logical)
-HANDLE_BINARY_INST(25, AShr , BinaryOperator) // Shift right (arithmetic)
-HANDLE_BINARY_INST(26, And , BinaryOperator)
-HANDLE_BINARY_INST(27, Or , BinaryOperator)
-HANDLE_BINARY_INST(28, Xor , BinaryOperator)
- LAST_BINARY_INST(28)
+HANDLE_BINARY_INST(24, Shl , BinaryOperator) // Shift left (logical)
+HANDLE_BINARY_INST(25, LShr , BinaryOperator) // Shift right (logical)
+HANDLE_BINARY_INST(26, AShr , BinaryOperator) // Shift right (arithmetic)
+HANDLE_BINARY_INST(27, And , BinaryOperator)
+HANDLE_BINARY_INST(28, Or , BinaryOperator)
+HANDLE_BINARY_INST(29, Xor , BinaryOperator)
+ LAST_BINARY_INST(29)
// Memory operators...
- FIRST_MEMORY_INST(29)
-HANDLE_MEMORY_INST(29, Alloca, AllocaInst) // Stack management
-HANDLE_MEMORY_INST(30, Load , LoadInst ) // Memory manipulation instrs
-HANDLE_MEMORY_INST(31, Store , StoreInst )
-HANDLE_MEMORY_INST(32, GetElementPtr, GetElementPtrInst)
-HANDLE_MEMORY_INST(33, Fence , FenceInst )
-HANDLE_MEMORY_INST(34, AtomicCmpXchg , AtomicCmpXchgInst )
-HANDLE_MEMORY_INST(35, AtomicRMW , AtomicRMWInst )
- LAST_MEMORY_INST(35)
+ FIRST_MEMORY_INST(30)
+HANDLE_MEMORY_INST(30, Alloca, AllocaInst) // Stack management
+HANDLE_MEMORY_INST(31, Load , LoadInst ) // Memory manipulation instrs
+HANDLE_MEMORY_INST(32, Store , StoreInst )
+HANDLE_MEMORY_INST(33, GetElementPtr, GetElementPtrInst)
+HANDLE_MEMORY_INST(34, Fence , FenceInst )
+HANDLE_MEMORY_INST(35, AtomicCmpXchg , AtomicCmpXchgInst )
+HANDLE_MEMORY_INST(36, AtomicRMW , AtomicRMWInst )
+ LAST_MEMORY_INST(36)
// Cast operators ...
// NOTE: The order matters here because CastInst::isEliminableCastPair
// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
- FIRST_CAST_INST(36)
-HANDLE_CAST_INST(36, Trunc , TruncInst ) // Truncate integers
-HANDLE_CAST_INST(37, ZExt , ZExtInst ) // Zero extend integers
-HANDLE_CAST_INST(38, SExt , SExtInst ) // Sign extend integers
-HANDLE_CAST_INST(39, FPToUI , FPToUIInst ) // floating point -> UInt
-HANDLE_CAST_INST(40, FPToSI , FPToSIInst ) // floating point -> SInt
-HANDLE_CAST_INST(41, UIToFP , UIToFPInst ) // UInt -> floating point
-HANDLE_CAST_INST(42, SIToFP , SIToFPInst ) // SInt -> floating point
-HANDLE_CAST_INST(43, FPTrunc , FPTruncInst ) // Truncate floating point
-HANDLE_CAST_INST(44, FPExt , FPExtInst ) // Extend floating point
-HANDLE_CAST_INST(45, PtrToInt, PtrToIntInst) // Pointer -> Integer
-HANDLE_CAST_INST(46, IntToPtr, IntToPtrInst) // Integer -> Pointer
-HANDLE_CAST_INST(47, BitCast , BitCastInst ) // Type cast
-HANDLE_CAST_INST(48, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
- LAST_CAST_INST(48)
-
- FIRST_FUNCLETPAD_INST(49)
-HANDLE_FUNCLETPAD_INST(49, CleanupPad, CleanupPadInst)
-HANDLE_FUNCLETPAD_INST(50, CatchPad , CatchPadInst)
- LAST_FUNCLETPAD_INST(50)
+ FIRST_CAST_INST(37)
+HANDLE_CAST_INST(37, Trunc , TruncInst ) // Truncate integers
+HANDLE_CAST_INST(38, ZExt , ZExtInst ) // Zero extend integers
+HANDLE_CAST_INST(39, SExt , SExtInst ) // Sign extend integers
+HANDLE_CAST_INST(40, FPToUI , FPToUIInst ) // floating point -> UInt
+HANDLE_CAST_INST(41, FPToSI , FPToSIInst ) // floating point -> SInt
+HANDLE_CAST_INST(42, UIToFP , UIToFPInst ) // UInt -> floating point
+HANDLE_CAST_INST(43, SIToFP , SIToFPInst ) // SInt -> floating point
+HANDLE_CAST_INST(44, FPTrunc , FPTruncInst ) // Truncate floating point
+HANDLE_CAST_INST(45, FPExt , FPExtInst ) // Extend floating point
+HANDLE_CAST_INST(46, PtrToInt, PtrToIntInst) // Pointer -> Integer
+HANDLE_CAST_INST(47, IntToPtr, IntToPtrInst) // Integer -> Pointer
+HANDLE_CAST_INST(48, BitCast , BitCastInst ) // Type cast
+HANDLE_CAST_INST(49, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
+ LAST_CAST_INST(49)
+
+ FIRST_FUNCLETPAD_INST(50)
+HANDLE_FUNCLETPAD_INST(50, CleanupPad, CleanupPadInst)
+HANDLE_FUNCLETPAD_INST(51, CatchPad , CatchPadInst)
+ LAST_FUNCLETPAD_INST(51)
// Other operators...
- FIRST_OTHER_INST(51)
-HANDLE_OTHER_INST(51, ICmp , ICmpInst ) // Integer comparison instruction
-HANDLE_OTHER_INST(52, FCmp , FCmpInst ) // Floating point comparison instr.
-HANDLE_OTHER_INST(53, PHI , PHINode ) // PHI node instruction
-HANDLE_OTHER_INST(54, Call , CallInst ) // Call a function
-HANDLE_OTHER_INST(55, Select , SelectInst ) // select instruction
-HANDLE_USER_INST (56, UserOp1, Instruction) // May be used internally in a pass
-HANDLE_USER_INST (57, UserOp2, Instruction) // Internal to passes only
-HANDLE_OTHER_INST(58, VAArg , VAArgInst ) // vaarg instruction
-HANDLE_OTHER_INST(59, ExtractElement, ExtractElementInst)// extract from vector
-HANDLE_OTHER_INST(60, InsertElement, InsertElementInst) // insert into vector
-HANDLE_OTHER_INST(61, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
-HANDLE_OTHER_INST(62, ExtractValue, ExtractValueInst)// extract from aggregate
-HANDLE_OTHER_INST(63, InsertValue, InsertValueInst) // insert into aggregate
-HANDLE_OTHER_INST(64, LandingPad, LandingPadInst) // Landing pad instruction.
- LAST_OTHER_INST(64)
+ FIRST_OTHER_INST(52)
+HANDLE_OTHER_INST(52, ICmp , ICmpInst ) // Integer comparison instruction
+HANDLE_OTHER_INST(53, FCmp , FCmpInst ) // Floating point comparison instr.
+HANDLE_OTHER_INST(54, PHI , PHINode ) // PHI node instruction
+HANDLE_OTHER_INST(55, Call , CallInst ) // Call a function
+HANDLE_OTHER_INST(56, Select , SelectInst ) // select instruction
+HANDLE_USER_INST (57, UserOp1, Instruction) // May be used internally in a pass
+HANDLE_USER_INST (58, UserOp2, Instruction) // Internal to passes only
+HANDLE_OTHER_INST(59, VAArg , VAArgInst ) // vaarg instruction
+HANDLE_OTHER_INST(60, ExtractElement, ExtractElementInst)// extract from vector
+HANDLE_OTHER_INST(61, InsertElement, InsertElementInst) // insert into vector
+HANDLE_OTHER_INST(62, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
+HANDLE_OTHER_INST(63, ExtractValue, ExtractValueInst)// extract from aggregate
+HANDLE_OTHER_INST(64, InsertValue, InsertValueInst) // insert into aggregate
+HANDLE_OTHER_INST(65, LandingPad, LandingPadInst) // Landing pad instruction.
+ LAST_OTHER_INST(65)
#undef FIRST_TERM_INST
#undef HANDLE_TERM_INST
#undef LAST_TERM_INST
+#undef FIRST_UNARY_INST
+#undef HANDLE_UNARY_INST
+#undef LAST_UNARY_INST
+
#undef FIRST_BINARY_INST
#undef HANDLE_BINARY_INST
#undef LAST_BINARY_INST
diff --git a/contrib/llvm/include/llvm/IR/Instruction.h b/contrib/llvm/include/llvm/IR/Instruction.h
index 643c2a0761d1..5e78cb1edf02 100644
--- a/contrib/llvm/include/llvm/IR/Instruction.h
+++ b/contrib/llvm/include/llvm/IR/Instruction.h
@@ -127,11 +127,15 @@ public:
const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
bool isTerminator() const { return isTerminator(getOpcode()); }
+ bool isUnaryOp() const { return isUnaryOp(getOpcode()); }
bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
bool isIntDivRem() const { return isIntDivRem(getOpcode()); }
bool isShift() { return isShift(getOpcode()); }
bool isCast() const { return isCast(getOpcode()); }
bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
+ bool isExceptionalTerminator() const {
+ return isExceptionalTerminator(getOpcode());
+ }
static const char* getOpcodeName(unsigned OpCode);
@@ -139,6 +143,9 @@ public:
return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
}
+ static inline bool isUnaryOp(unsigned Opcode) {
+ return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd;
+ }
static inline bool isBinaryOp(unsigned Opcode) {
return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
}
@@ -182,6 +189,20 @@ public:
return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
}
+ /// Returns true if the OpCode is a terminator related to exception handling.
+ static inline bool isExceptionalTerminator(unsigned OpCode) {
+ switch (OpCode) {
+ case Instruction::CatchSwitch:
+ case Instruction::CatchRet:
+ case Instruction::CleanupRet:
+ case Instruction::Invoke:
+ case Instruction::Resume:
+ return true;
+ default:
+ return false;
+ }
+ }
+
//===--------------------------------------------------------------------===//
// Metadata manipulation.
//===--------------------------------------------------------------------===//
@@ -561,6 +582,10 @@ public:
}
}
+ /// Return true if the instruction is a llvm.lifetime.start or
+ /// llvm.lifetime.end marker.
+ bool isLifetimeStartOrEnd() const;
+
/// Return a pointer to the next non-debug instruction in the same basic
/// block as 'this', or nullptr if no such instruction exists.
const Instruction *getNextNonDebugInstruction() const;
@@ -569,6 +594,14 @@ public:
static_cast<const Instruction *>(this)->getNextNonDebugInstruction());
}
+ /// Return a pointer to the previous non-debug instruction in the same basic
+ /// block as 'this', or nullptr if no such instruction exists.
+ const Instruction *getPrevNonDebugInstruction() const;
+ Instruction *getPrevNonDebugInstruction() {
+ return const_cast<Instruction *>(
+ static_cast<const Instruction *>(this)->getPrevNonDebugInstruction());
+ }
+
/// Create a copy of 'this' instruction that is identical in all ways except
/// the following:
/// * The instruction has no parent
@@ -611,6 +644,16 @@ public:
/// operands in the corresponding predecessor block.
bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
+ /// Return the number of successors that this instruction has. The instruction
+ /// must be a terminator.
+ unsigned getNumSuccessors() const;
+
+ /// Return the specified successor. This instruction must be a terminator.
+ BasicBlock *getSuccessor(unsigned Idx) const;
+
+ /// Update the specified successor to point at the provided block. This
+ /// instruction must be a terminator.
+ void setSuccessor(unsigned Idx, BasicBlock *BB);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
@@ -627,6 +670,13 @@ public:
#include "llvm/IR/Instruction.def"
};
+ enum UnaryOps {
+#define FIRST_UNARY_INST(N) UnaryOpsBegin = N,
+#define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N,
+#define LAST_UNARY_INST(N) UnaryOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
enum BinaryOps {
#define FIRST_BINARY_INST(N) BinaryOpsBegin = N,
#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
diff --git a/contrib/llvm/include/llvm/IR/Instructions.h b/contrib/llvm/include/llvm/IR/Instructions.h
index 9be8bd1a07bc..0ff8f56f213a 100644
--- a/contrib/llvm/include/llvm/IR/Instructions.h
+++ b/contrib/llvm/include/llvm/IR/Instructions.h
@@ -175,47 +175,58 @@ protected:
LoadInst *cloneImpl() const;
public:
- LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
- LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
- LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
- Instruction *InsertBefore = nullptr)
- : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, InsertBefore) {}
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+ Instruction *InsertBefore = nullptr);
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
- Instruction *InsertBefore = nullptr)
- : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, Align, InsertBefore) {}
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, Instruction *InsertBefore = nullptr);
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd);
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
- AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
- Instruction *InsertBefore = nullptr)
- : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
- LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
- LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
- LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
- LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
- bool isVolatile = false, Instruction *InsertBefore = nullptr);
- explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
- bool isVolatile = false,
+
+ // Deprecated [opaque pointer types]
+ explicit LoadInst(Value *Ptr, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr)
- : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, InsertBefore) {}
- LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
- BasicBlock *InsertAtEnd);
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ InsertBefore) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ InsertAtEnd) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, InsertBefore) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ BasicBlock *InsertAtEnd)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, InsertAtEnd) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, Align, InsertBefore) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ BasicBlock *InsertAtEnd)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, Align, InsertAtEnd) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, Align, Order, SSID, InsertBefore) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd)
+ : LoadInst(Ptr->getType()->getPointerElementType(), Ptr, NameStr,
+ isVolatile, Align, Order, SSID, InsertAtEnd) {}
/// Return true if this is a load from a volatile memory location.
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
@@ -735,6 +746,8 @@ public:
return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
}
+ static StringRef getOperationName(BinOp Op);
+
void setOperation(BinOp Operation) {
unsigned short SubclassData = getSubclassDataFromInstruction();
setInstructionSubclassData((SubclassData & 31) |
@@ -1102,6 +1115,71 @@ GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
//===----------------------------------------------------------------------===//
+// UnaryOperator Class
+//===----------------------------------------------------------------------===//
+
+/// a unary instruction
+class UnaryOperator : public UnaryInstruction {
+ void AssertOK();
+
+protected:
+ UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
+ const Twine &Name, Instruction *InsertBefore);
+ UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+
+ UnaryOperator *cloneImpl() const;
+
+public:
+
+ /// Construct a unary instruction, given the opcode and an operand.
+ /// Optionally (if InstBefore is specified) insert the instruction
+ /// into a BasicBlock right before the specified instruction. The specified
+ /// Instruction is allowed to be a dereferenced end iterator.
+ ///
+ static UnaryOperator *Create(UnaryOps Op, Value *S,
+ const Twine &Name = Twine(),
+ Instruction *InsertBefore = nullptr);
+
+ /// Construct a unary instruction, given the opcode and an operand.
+ /// Also automatically insert this instruction to the end of the
+ /// BasicBlock specified.
+ ///
+ static UnaryOperator *Create(UnaryOps Op, Value *S,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd);
+
+ /// These methods just forward to Create, and are useful when you
+ /// statically know what type of instruction you're going to create. These
+ /// helpers just save some typing.
+#define HANDLE_UNARY_INST(N, OPC, CLASS) \
+ static UnaryInstruction *Create##OPC(Value *V, \
+ const Twine &Name = "") {\
+ return Create(Instruction::OPC, V, Name);\
+ }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_UNARY_INST(N, OPC, CLASS) \
+ static UnaryInstruction *Create##OPC(Value *V, \
+ const Twine &Name, BasicBlock *BB) {\
+ return Create(Instruction::OPC, V, Name, BB);\
+ }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_UNARY_INST(N, OPC, CLASS) \
+ static UnaryInstruction *Create##OPC(Value *V, \
+ const Twine &Name, Instruction *I) {\
+ return Create(Instruction::OPC, V, Name, I);\
+ }
+#include "llvm/IR/Instruction.def"
+
+ UnaryOps getOpcode() const {
+ return static_cast<UnaryOps>(Instruction::getOpcode());
+ }
+};
+
+//===----------------------------------------------------------------------===//
// ICmpInst Class
//===----------------------------------------------------------------------===//
@@ -1297,12 +1375,13 @@ public:
/// Constructor with no-insertion semantics
FCmpInst(
- Predicate pred, ///< The predicate to use for the comparison
+ Predicate Pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
- const Twine &NameStr = "" ///< Name of the instruction
- ) : CmpInst(makeCmpResultType(LHS->getType()),
- Instruction::FCmp, pred, LHS, RHS, NameStr) {
+ const Twine &NameStr = "", ///< Name of the instruction
+ Instruction *FlagsSource = nullptr
+ ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
+ RHS, NameStr, nullptr, FlagsSource) {
AssertOK();
}
@@ -1350,537 +1429,13 @@ public:
}
};
-class CallInst;
-class InvokeInst;
-
-template <class T> struct CallBaseParent { using type = Instruction; };
-
-template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
-
-//===----------------------------------------------------------------------===//
-/// Base class for all callable instructions (InvokeInst and CallInst)
-/// Holds everything related to calling a function, abstracting from the base
-/// type @p BaseInstTy and the concrete instruction @p InstTy
-///
-template <class InstTy>
-class CallBase : public CallBaseParent<InstTy>::type,
- public OperandBundleUser<InstTy, User::op_iterator> {
-protected:
- AttributeList Attrs; ///< parameter attributes for callable
- FunctionType *FTy;
- using BaseInstTy = typename CallBaseParent<InstTy>::type;
-
- template <class... ArgsTy>
- CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
- : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
- bool hasDescriptor() const { return Value::HasDescriptor; }
-
- using BaseInstTy::BaseInstTy;
-
- using OperandBundleUser<InstTy,
- User::op_iterator>::isFnAttrDisallowedByOpBundle;
- using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
- using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
- using Instruction::getSubclassDataFromInstruction;
- using Instruction::setInstructionSubclassData;
-
-public:
- using Instruction::getContext;
- using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
- using OperandBundleUser<InstTy,
- User::op_iterator>::getBundleOperandsStartIndex;
-
- static bool classof(const Instruction *I) {
- llvm_unreachable(
- "CallBase is not meant to be used as part of the classof hierarchy");
- }
-
-public:
- /// Return the parameter attributes for this call.
- ///
- AttributeList getAttributes() const { return Attrs; }
-
- /// Set the parameter attributes for this call.
- ///
- void setAttributes(AttributeList A) { Attrs = A; }
-
- FunctionType *getFunctionType() const { return FTy; }
-
- void mutateFunctionType(FunctionType *FTy) {
- Value::mutateType(FTy->getReturnType());
- this->FTy = FTy;
- }
-
- /// Return the number of call arguments.
- ///
- unsigned getNumArgOperands() const {
- return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
- }
-
- /// getArgOperand/setArgOperand - Return/set the i-th call argument.
- ///
- Value *getArgOperand(unsigned i) const {
- assert(i < getNumArgOperands() && "Out of bounds!");
- return getOperand(i);
- }
- void setArgOperand(unsigned i, Value *v) {
- assert(i < getNumArgOperands() && "Out of bounds!");
- setOperand(i, v);
- }
-
- /// Return the iterator pointing to the beginning of the argument list.
- User::op_iterator arg_begin() { return op_begin(); }
-
- /// Return the iterator pointing to the end of the argument list.
- User::op_iterator arg_end() {
- // [ call args ], [ operand bundles ], callee
- return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
- }
-
- /// Iteration adapter for range-for loops.
- iterator_range<User::op_iterator> arg_operands() {
- return make_range(arg_begin(), arg_end());
- }
-
- /// Return the iterator pointing to the beginning of the argument list.
- User::const_op_iterator arg_begin() const { return op_begin(); }
-
- /// Return the iterator pointing to the end of the argument list.
- User::const_op_iterator arg_end() const {
- // [ call args ], [ operand bundles ], callee
- return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
- }
-
- /// Iteration adapter for range-for loops.
- iterator_range<User::const_op_iterator> arg_operands() const {
- return make_range(arg_begin(), arg_end());
- }
-
- /// Wrappers for getting the \c Use of a call argument.
- const Use &getArgOperandUse(unsigned i) const {
- assert(i < getNumArgOperands() && "Out of bounds!");
- return User::getOperandUse(i);
- }
- Use &getArgOperandUse(unsigned i) {
- assert(i < getNumArgOperands() && "Out of bounds!");
- return User::getOperandUse(i);
- }
-
- /// If one of the arguments has the 'returned' attribute, return its
- /// operand value. Otherwise, return nullptr.
- Value *getReturnedArgOperand() const {
- unsigned Index;
-
- if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
- return getArgOperand(Index - AttributeList::FirstArgIndex);
- if (const Function *F = getCalledFunction())
- if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
- Index)
- return getArgOperand(Index - AttributeList::FirstArgIndex);
-
- return nullptr;
- }
-
- User::op_iterator op_begin() {
- return OperandTraits<CallBase>::op_begin(this);
- }
-
- User::const_op_iterator op_begin() const {
- return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
- }
-
- User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
-
- User::const_op_iterator op_end() const {
- return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
- }
-
- Value *getOperand(unsigned i_nocapture) const {
- assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&
- "getOperand() out of range!");
- return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
- const_cast<CallBase *>(this))[i_nocapture]
- .get());
- }
-
- void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
- assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&
- "setOperand() out of range!");
- OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
- }
-
- unsigned getNumOperands() const {
- return OperandTraits<CallBase>::operands(this);
- }
- template <int Idx_nocapture> Use &Op() {
- return User::OpFrom<Idx_nocapture>(this);
- }
- template <int Idx_nocapture> const Use &Op() const {
- return User::OpFrom<Idx_nocapture>(this);
- }
-
- /// Return the function called, or null if this is an
- /// indirect function invocation.
- ///
- Function *getCalledFunction() const {
- return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
- }
-
- /// Determine whether this call has the given attribute.
- bool hasFnAttr(Attribute::AttrKind Kind) const {
- assert(Kind != Attribute::NoBuiltin &&
- "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin");
- return hasFnAttrImpl(Kind);
- }
-
- /// Determine whether this call has the given attribute.
- bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
-
- /// getCallingConv/setCallingConv - Get or set the calling convention of this
- /// function call.
- CallingConv::ID getCallingConv() const {
- return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
- }
- void setCallingConv(CallingConv::ID CC) {
- auto ID = static_cast<unsigned>(CC);
- assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
- (ID << 2));
- }
-
-
- /// adds the attribute to the list of attributes.
- void addAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeList PAL = getAttributes();
- PAL = PAL.addAttribute(getContext(), i, Kind);
- setAttributes(PAL);
- }
-
- /// adds the attribute to the list of attributes.
- void addAttribute(unsigned i, Attribute Attr) {
- AttributeList PAL = getAttributes();
- PAL = PAL.addAttribute(getContext(), i, Attr);
- setAttributes(PAL);
- }
-
- /// Adds the attribute to the indicated argument
- void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- AttributeList PAL = getAttributes();
- PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
- setAttributes(PAL);
- }
-
- /// Adds the attribute to the indicated argument
- void addParamAttr(unsigned ArgNo, Attribute Attr) {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- AttributeList PAL = getAttributes();
- PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
- setAttributes(PAL);
- }
-
- /// removes the attribute from the list of attributes.
- void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeList PAL = getAttributes();
- PAL = PAL.removeAttribute(getContext(), i, Kind);
- setAttributes(PAL);
- }
-
- /// removes the attribute from the list of attributes.
- void removeAttribute(unsigned i, StringRef Kind) {
- AttributeList PAL = getAttributes();
- PAL = PAL.removeAttribute(getContext(), i, Kind);
- setAttributes(PAL);
- }
-
- /// Removes the attribute from the given argument
- void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- AttributeList PAL = getAttributes();
- PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
- setAttributes(PAL);
- }
-
- /// Removes the attribute from the given argument
- void removeParamAttr(unsigned ArgNo, StringRef Kind) {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- AttributeList PAL = getAttributes();
- PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
- setAttributes(PAL);
- }
-
- /// adds the dereferenceable attribute to the list of attributes.
- void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
- AttributeList PAL = getAttributes();
- PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
- setAttributes(PAL);
- }
-
- /// adds the dereferenceable_or_null attribute to the list of
- /// attributes.
- void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
- AttributeList PAL = getAttributes();
- PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
- setAttributes(PAL);
- }
-
- /// Determine whether the return value has the given attribute.
- bool hasRetAttr(Attribute::AttrKind Kind) const {
- if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
- return true;
-
- // Look at the callee, if available.
- if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
- return false;
- }
-
- /// Determine whether the argument or parameter has the given attribute.
- bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
- assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
-
- if (Attrs.hasParamAttribute(ArgNo, Kind))
- return true;
- if (const Function *F = getCalledFunction())
- return F->getAttributes().hasParamAttribute(ArgNo, Kind);
- return false;
- }
-
- /// Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
- return getAttributes().getAttribute(i, Kind);
- }
-
- /// Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, StringRef Kind) const {
- return getAttributes().getAttribute(i, Kind);
- }
-
- /// Get the attribute of a given kind from a given arg
- Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- return getAttributes().getParamAttr(ArgNo, Kind);
- }
-
- /// Get the attribute of a given kind from a given arg
- Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
- assert(ArgNo < getNumArgOperands() && "Out of bounds");
- return getAttributes().getParamAttr(ArgNo, Kind);
- }
- /// Return true if the data operand at index \p i has the attribute \p
- /// A.
- ///
- /// Data operands include call arguments and values used in operand bundles,
- /// but does not include the callee operand. This routine dispatches to the
- /// underlying AttributeList or the OperandBundleUser as appropriate.
- ///
- /// The index \p i is interpreted as
- ///
- /// \p i == Attribute::ReturnIndex -> the return value
- /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
- /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
- /// (\p i - 1) in the operand list.
- bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
- // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
- // The last operand is the callee.
- assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&
- "Data operand index out of bounds!");
-
- // The attribute A can either be directly specified, if the operand in
- // question is a call argument; or be indirectly implied by the kind of its
- // containing operand bundle, if the operand is a bundle operand.
-
- if (i == AttributeList::ReturnIndex)
- return hasRetAttr(Kind);
-
- // FIXME: Avoid these i - 1 calculations and update the API to use
- // zero-based indices.
- if (i < (getNumArgOperands() + 1))
- return paramHasAttr(i - 1, Kind);
-
- assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
- "Must be either a call argument or an operand bundle!");
- return bundleOperandHasAttr(i - 1, Kind);
- }
-
- /// Extract the alignment of the return value.
- unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
-
- /// Extract the alignment for a call or parameter (0=unknown).
- unsigned getParamAlignment(unsigned ArgNo) const {
- return Attrs.getParamAlignment(ArgNo);
- }
-
- /// Extract the number of dereferenceable bytes for a call or
- /// parameter (0=unknown).
- uint64_t getDereferenceableBytes(unsigned i) const {
- return Attrs.getDereferenceableBytes(i);
- }
-
- /// Extract the number of dereferenceable_or_null bytes for a call or
- /// parameter (0=unknown).
- uint64_t getDereferenceableOrNullBytes(unsigned i) const {
- return Attrs.getDereferenceableOrNullBytes(i);
- }
-
- /// Determine if the return value is marked with NoAlias attribute.
- bool returnDoesNotAlias() const {
- return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
- }
-
- /// Return true if the call should not be treated as a call to a
- /// builtin.
- bool isNoBuiltin() const {
- return hasFnAttrImpl(Attribute::NoBuiltin) &&
- !hasFnAttrImpl(Attribute::Builtin);
- }
-
- /// Determine if the call requires strict floating point semantics.
- bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
-
- /// Return true if the call should not be inlined.
- bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
- void setIsNoInline() {
- addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
- }
- /// Determine if the call does not access memory.
- bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
- }
- void setDoesNotAccessMemory() {
- addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
- }
-
- /// Determine if the call does not access or only reads memory.
- bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
- }
- void setOnlyReadsMemory() {
- addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
- }
-
- /// Determine if the call does not access or only writes memory.
- bool doesNotReadMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
- }
- void setDoesNotReadMemory() {
- addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
- }
-
- /// Determine if the call can access memmory only using pointers based
- /// on its arguments.
- bool onlyAccessesArgMemory() const {
- return hasFnAttr(Attribute::ArgMemOnly);
- }
- void setOnlyAccessesArgMemory() {
- addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
- }
-
- /// Determine if the function may only access memory that is
- /// inaccessible from the IR.
- bool onlyAccessesInaccessibleMemory() const {
- return hasFnAttr(Attribute::InaccessibleMemOnly);
- }
- void setOnlyAccessesInaccessibleMemory() {
- addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
- }
-
- /// Determine if the function may only access memory that is
- /// either inaccessible from the IR or pointed to by its arguments.
- bool onlyAccessesInaccessibleMemOrArgMem() const {
- return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
- }
- void setOnlyAccessesInaccessibleMemOrArgMem() {
- addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
- }
- /// Determine if the call cannot return.
- bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
- void setDoesNotReturn() {
- addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
- }
-
- /// Determine if the call should not perform indirect branch tracking.
- bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
-
- /// Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow() {
- addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
- }
-
- /// Determine if the invoke cannot be duplicated.
- bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
- void setCannotDuplicate() {
- addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
- }
-
- /// Determine if the invoke is convergent
- bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
- void setConvergent() {
- addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
- }
- void setNotConvergent() {
- removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
- }
-
- /// Determine if the call returns a structure through first
- /// pointer argument.
- bool hasStructRetAttr() const {
- if (getNumArgOperands() == 0)
- return false;
-
- // Be friendly and also check the callee.
- return paramHasAttr(0, Attribute::StructRet);
- }
-
- /// Determine if any call argument is an aggregate passed by value.
- bool hasByValArgument() const {
- return Attrs.hasAttrSomewhere(Attribute::ByVal);
- }
- /// Get a pointer to the function that is invoked by this
- /// instruction.
- const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
- Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
-
- /// Set the function called.
- void setCalledFunction(Value* Fn) {
- setCalledFunction(
- cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
- Fn);
- }
- void setCalledFunction(FunctionType *FTy, Value *Fn) {
- this->FTy = FTy;
- assert(FTy == cast<FunctionType>(
- cast<PointerType>(Fn->getType())->getElementType()));
- Op<-InstTy::ArgOffset>() = Fn;
- }
-
-protected:
- template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
- if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
- return true;
-
- // Operand bundles override attributes on the called function, but don't
- // override attributes directly present on the call instruction.
- if (isFnAttrDisallowedByOpBundle(Kind))
- return false;
-
- if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
- Kind);
- return false;
- }
-};
-
//===----------------------------------------------------------------------===//
/// This class represents a function call, abstracting a target
/// machine's calling convention. This class uses low bit of the SubClassData
/// field to indicate whether or not this is a tail call. The rest of the bits
/// hold the calling convention of the call.
///
-class CallInst : public CallBase<CallInst> {
- friend class OperandBundleUser<CallInst, User::op_iterator>;
-
+class CallInst : public CallBase {
CallInst(const CallInst &CI);
/// Construct a CallInst given a range of arguments.
@@ -1889,36 +1444,32 @@ class CallInst : public CallBase<CallInst> {
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore);
- inline CallInst(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
- Instruction *InsertBefore)
- : CallInst(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr, InsertBefore) {}
-
- inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
- Instruction *InsertBefore)
- : CallInst(Func, Args, None, NameStr, InsertBefore) {}
+ inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
/// Construct a CallInst given a range of arguments.
/// Construct a CallInst from a range of arguments
- inline CallInst(Value *Func, ArrayRef<Value *> Args,
+ inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
BasicBlock *InsertAtEnd);
- explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
+ explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
+ Instruction *InsertBefore);
- CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
+ CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
- void init(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
- init(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr);
- }
void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
- void init(Value *Func, const Twine &NameStr);
+ void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
+
+ /// Compute the number of operands to allocate.
+ static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
+ // We need one operand for the called function, plus the input operand
+ // counts provided.
+ return 1 + NumArgs + NumBundleInputs;
+ }
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
@@ -1927,29 +1478,15 @@ protected:
CallInst *cloneImpl() const;
public:
- static constexpr int ArgOffset = 1;
-
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles = None,
- const Twine &NameStr = "",
+ static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr, InsertBefore);
- }
-
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
- const Twine &NameStr,
- Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, None, NameStr, InsertBefore);
+ return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
}
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
- return new (unsigned(Args.size() + 1))
+ return new (ComputeNumOperands(Args.size()))
CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
}
@@ -1957,39 +1494,107 @@ public:
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
- const unsigned TotalOps =
- unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+ const int NumOperands =
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
- return new (TotalOps, DescriptorBytes)
+ return new (NumOperands, DescriptorBytes)
CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
}
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
+ }
+
+ static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return new (ComputeNumOperands(Args.size()))
+ CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
+ }
+
+ static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- const unsigned TotalOps =
- unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+ const int NumOperands =
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
- return new (TotalOps, DescriptorBytes)
- CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
+ return new (NumOperands, DescriptorBytes)
+ CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
}
- static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ static CallInst *Create(Function *Func, const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(Func->getFunctionType(), Func, NameStr, InsertBefore);
+ }
+
+ static CallInst *Create(Function *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(Func->getFunctionType(), Func, Args, NameStr, InsertBefore);
+ }
+
+ static CallInst *Create(Function *Func, const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return Create(Func->getFunctionType(), Func, NameStr, InsertAtEnd);
+ }
+
+ static CallInst *Create(Function *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return new (unsigned(Args.size() + 1))
- CallInst(Func, Args, None, NameStr, InsertAtEnd);
+ return Create(Func->getFunctionType(), Func, Args, NameStr, InsertAtEnd);
}
- static CallInst *Create(Value *F, const Twine &NameStr = "",
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, NameStr, InsertBefore);
+ }
+
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
- return new (1) CallInst(F, NameStr, InsertBefore);
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, NameStr, InsertBefore);
}
- static CallInst *Create(Value *F, const Twine &NameStr,
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr, InsertBefore);
+ }
+
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, const Twine &NameStr,
BasicBlock *InsertAtEnd) {
- return new (1) CallInst(F, NameStr, InsertAtEnd);
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, NameStr, InsertAtEnd);
+ }
+
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, NameStr, InsertAtEnd);
+ }
+
+ // Deprecated [opaque pointer types]
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr, InsertAtEnd);
}
/// Create a clone of \p CI with a different set of operand bundles and
@@ -2080,7 +1685,7 @@ public:
}
/// Check if this call is an inline asm statement.
- bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
+ bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
@@ -2098,32 +1703,25 @@ private:
}
};
-template <>
-struct OperandTraits<CallBase<CallInst>>
- : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
-
-CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
+CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
BasicBlock *InsertAtEnd)
- : CallBase<CallInst>(
- cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType())
- ->getReturnType(),
- Instruction::Call,
- OperandTraits<CallBase<CallInst>>::op_end(this) -
- (Args.size() + CountBundleInputs(Bundles) + 1),
- unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
- init(Func, Args, Bundles, NameStr);
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) -
+ (Args.size() + CountBundleInputs(Bundles) + 1),
+ unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
+ InsertAtEnd) {
+ init(Ty, Func, Args, Bundles, NameStr);
}
CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore)
- : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
- OperandTraits<CallBase<CallInst>>::op_end(this) -
- (Args.size() + CountBundleInputs(Bundles) + 1),
- unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
- InsertBefore) {
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) -
+ (Args.size() + CountBundleInputs(Bundles) + 1),
+ unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
+ InsertBefore) {
init(Ty, Func, Args, Bundles, NameStr);
}
@@ -2456,14 +2054,24 @@ public:
}
/// Return true if this shuffle returns a vector with a different number of
- /// elements than its source elements.
- /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2>
+ /// elements than its source vectors.
+ /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
+ /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
bool changesLength() const {
unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
return NumSourceElts != NumMaskElts;
}
+ /// Return true if this shuffle returns a vector with a greater number of
+ /// elements than its source vectors.
+ /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
+ bool increasesLength() const {
+ unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
+ unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
+ return NumSourceElts < NumMaskElts;
+ }
+
/// Return true if this shuffle mask chooses elements from exactly one source
/// vector.
/// Example: <7,5,undef,7>
@@ -2497,15 +2105,27 @@ public:
return isIdentityMask(MaskAsInts);
}
- /// Return true if this shuffle mask chooses elements from exactly one source
+ /// Return true if this shuffle chooses elements from exactly one source
/// vector without lane crossings and does not change the number of elements
/// from its input vectors.
/// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
- /// TODO: Optionally allow length-changing shuffles.
bool isIdentity() const {
return !changesLength() && isIdentityMask(getShuffleMask());
}
+ /// Return true if this shuffle lengthens exactly one source vector with
+ /// undefs in the high elements.
+ bool isIdentityWithPadding() const;
+
+ /// Return true if this shuffle extracts the first N elements of exactly one
+ /// source vector.
+ bool isIdentityWithExtract() const;
+
+ /// Return true if this shuffle concatenates its 2 source vectors. This
+ /// returns false if either input is undefined. In that case, the shuffle is
+ /// is better classified as an identity with padding operation.
+ bool isConcat() const;
+
/// Return true if this shuffle mask chooses elements from its source vectors
/// without lane crossings. A shuffle using this mask would be
/// equivalent to a vector select with a constant condition operand.
@@ -2625,6 +2245,25 @@ public:
return !changesLength() && isTransposeMask(getMask());
}
+ /// Return true if this shuffle mask is an extract subvector mask.
+ /// A valid extract subvector mask returns a smaller vector from a single
+ /// source operand. The base extraction index is returned as well.
+ static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
+ int &Index);
+ static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
+ int &Index) {
+ assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
+ SmallVector<int, 16> MaskAsInts;
+ getShuffleMask(Mask, MaskAsInts);
+ return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
+ }
+
+ /// Return true if this shuffle mask is an extract subvector mask.
+ bool isExtractSubvectorMask(int &Index) const {
+ int NumSrcElts = Op<0>()->getType()->getVectorNumElements();
+ return isExtractSubvectorMask(getMask(), NumSrcElts, Index);
+ }
+
/// Change values in a shuffle permute mask assuming the two vector operands
/// of length InVecNumElts have swapped position.
static void commuteShuffleMask(MutableArrayRef<int> Mask,
@@ -3241,7 +2880,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
/// Return a value (possibly void), from a function. Execution
/// does not continue in this function any longer.
///
-class ReturnInst : public TerminatorInst {
+class ReturnInst : public Instruction {
ReturnInst(const ReturnInst &RI);
private:
@@ -3301,8 +2940,6 @@ public:
}
private:
- friend TerminatorInst;
-
BasicBlock *getSuccessor(unsigned idx) const {
llvm_unreachable("ReturnInst has no successors!");
}
@@ -3325,7 +2962,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
//===---------------------------------------------------------------------------
/// Conditional or Unconditional Branch instruction.
///
-class BranchInst : public TerminatorInst {
+class BranchInst : public Instruction {
/// Ops list - Branches are strange. The operands are ordered:
/// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
/// they don't have to check for cond/uncond branchness. These are mostly
@@ -3354,6 +2991,33 @@ protected:
BranchInst *cloneImpl() const;
public:
+ /// Iterator type that casts an operand to a basic block.
+ ///
+ /// This only makes sense because the successors are stored as adjacent
+ /// operands for branch instructions.
+ struct succ_op_iterator
+ : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
+ std::random_access_iterator_tag, BasicBlock *,
+ ptrdiff_t, BasicBlock *, BasicBlock *> {
+ explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
+
+ BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
+ BasicBlock *operator->() const { return operator*(); }
+ };
+
+ /// The const version of `succ_op_iterator`.
+ struct const_succ_op_iterator
+ : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
+ std::random_access_iterator_tag,
+ const BasicBlock *, ptrdiff_t, const BasicBlock *,
+ const BasicBlock *> {
+ explicit const_succ_op_iterator(const_value_op_iterator I)
+ : iterator_adaptor_base(I) {}
+
+ const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
+ const BasicBlock *operator->() const { return operator*(); }
+ };
+
static BranchInst *Create(BasicBlock *IfTrue,
Instruction *InsertBefore = nullptr) {
return new(1) BranchInst(IfTrue, InsertBefore);
@@ -3408,6 +3072,18 @@ public:
/// continues to map correctly to each operand.
void swapSuccessors();
+ iterator_range<succ_op_iterator> successors() {
+ return make_range(
+ succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
+ succ_op_iterator(value_op_end()));
+ }
+
+ iterator_range<const_succ_op_iterator> successors() const {
+ return make_range(const_succ_op_iterator(
+ std::next(value_op_begin(), isConditional() ? 1 : 0)),
+ const_succ_op_iterator(value_op_end()));
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Br);
@@ -3430,7 +3106,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
//===---------------------------------------------------------------------------
/// Multiway switch
///
-class SwitchInst : public TerminatorInst {
+class SwitchInst : public Instruction {
unsigned ReservedSpace;
// Operand[0] = Value to switch on
@@ -3513,7 +3189,7 @@ public:
/// Returns number of current case.
unsigned getCaseIndex() const { return Index; }
- /// Returns TerminatorInst's successor index for current case successor.
+ /// Returns successor index for current case successor.
unsigned getSuccessorIndex() const {
assert(((unsigned)Index == DefaultPseudoIndex ||
(unsigned)Index < SI->getNumCases()) &&
@@ -3569,7 +3245,7 @@ public:
CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
/// Initializes case iterator for given SwitchInst and for given
- /// TerminatorInst's successor index.
+ /// successor index.
static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
unsigned SuccessorIndex) {
assert(SuccessorIndex < SI->getNumSuccessors() &&
@@ -3787,7 +3463,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
//===---------------------------------------------------------------------------
/// Indirect Branch Instruction.
///
-class IndirectBrInst : public TerminatorInst {
+class IndirectBrInst : public Instruction {
unsigned ReservedSpace;
// Operand[0] = Address to jump to
@@ -3821,6 +3497,33 @@ protected:
IndirectBrInst *cloneImpl() const;
public:
+ /// Iterator type that casts an operand to a basic block.
+ ///
+ /// This only makes sense because the successors are stored as adjacent
+ /// operands for indirectbr instructions.
+ struct succ_op_iterator
+ : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
+ std::random_access_iterator_tag, BasicBlock *,
+ ptrdiff_t, BasicBlock *, BasicBlock *> {
+ explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
+
+ BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
+ BasicBlock *operator->() const { return operator*(); }
+ };
+
+ /// The const version of `succ_op_iterator`.
+ struct const_succ_op_iterator
+ : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
+ std::random_access_iterator_tag,
+ const BasicBlock *, ptrdiff_t, const BasicBlock *,
+ const BasicBlock *> {
+ explicit const_succ_op_iterator(const_value_op_iterator I)
+ : iterator_adaptor_base(I) {}
+
+ const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
+ const BasicBlock *operator->() const { return operator*(); }
+ };
+
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
Instruction *InsertBefore = nullptr) {
return new IndirectBrInst(Address, NumDests, InsertBefore);
@@ -3863,6 +3566,16 @@ public:
setOperand(i + 1, NewSucc);
}
+ iterator_range<succ_op_iterator> successors() {
+ return make_range(succ_op_iterator(std::next(value_op_begin())),
+ succ_op_iterator(value_op_end()));
+ }
+
+ iterator_range<const_succ_op_iterator> successors() const {
+ return make_range(const_succ_op_iterator(std::next(value_op_begin())),
+ const_succ_op_iterator(value_op_end()));
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::IndirectBr;
@@ -3885,48 +3598,43 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
/// Invoke instruction. The SubclassData field is used to hold the
/// calling convention of the call.
///
-class InvokeInst : public CallBase<InvokeInst> {
- friend class OperandBundleUser<InvokeInst, User::op_iterator>;
+class InvokeInst : public CallBase {
+ /// The number of operands for this call beyond the called function,
+ /// arguments, and operand bundles.
+ static constexpr int NumExtraOperands = 2;
+
+ /// The index from the end of the operand array to the normal destination.
+ static constexpr int NormalDestOpEndIdx = -3;
+
+ /// The index from the end of the operand array to the unwind destination.
+ static constexpr int UnwindDestOpEndIdx = -2;
InvokeInst(const InvokeInst &BI);
/// Construct an InvokeInst given a range of arguments.
///
/// Construct an InvokeInst from a range of arguments
- inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
- unsigned Values, const Twine &NameStr,
- Instruction *InsertBefore)
- : InvokeInst(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
- InsertBefore) {}
-
inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ ArrayRef<OperandBundleDef> Bundles, int NumOperands,
const Twine &NameStr, Instruction *InsertBefore);
- /// Construct an InvokeInst given a range of arguments.
- ///
- /// Construct an InvokeInst from a range of arguments
- inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
- unsigned Values, const Twine &NameStr,
- BasicBlock *InsertAtEnd);
-
- void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
- const Twine &NameStr) {
- init(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, NameStr);
- }
+ inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, int NumOperands,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
- void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
+ void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+ /// Compute the number of operands to allocate.
+ static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
+ // We need one operand for the called function, plus our extra operands and
+ // the input operand counts provided.
+ return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
+ }
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -3934,69 +3642,125 @@ protected:
InvokeInst *cloneImpl() const;
public:
- static constexpr int ArgOffset = 3;
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, None, NameStr,
- InsertBefore);
+ int NumOperands = ComputeNumOperands(Args.size());
+ return new (NumOperands)
+ InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
+ NameStr, InsertBefore);
}
- static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
- return Create(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, NameStr,
- InsertBefore);
+ int NumOperands =
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
+ unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (NumOperands, DescriptorBytes)
+ InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
+ NameStr, InsertBefore);
+ }
+
+ static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ int NumOperands = ComputeNumOperands(Args.size());
+ return new (NumOperands)
+ InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
+ NameStr, InsertAtEnd);
}
static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ int NumOperands =
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
+ unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (NumOperands, DescriptorBytes)
+ InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
+ NameStr, InsertAtEnd);
+ }
+
+ static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
- unsigned Values = unsigned(Args.size()) + 3;
- return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
- Values, NameStr, InsertBefore);
+ return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+ None, NameStr, InsertBefore);
}
- static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
- unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
- unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+ return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+ Bundles, NameStr, InsertBefore);
+ }
- return new (Values, DescriptorBytes)
- InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
- NameStr, InsertBefore);
+ static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+ NameStr, InsertAtEnd);
}
- static InvokeInst *Create(Value *Func,
- BasicBlock *IfNormal, BasicBlock *IfException,
- ArrayRef<Value *> Args, const Twine &NameStr,
- BasicBlock *InsertAtEnd) {
- unsigned Values = unsigned(Args.size()) + 3;
- return new (Values) InvokeInst(Func, IfNormal, IfException, Args, None,
- Values, NameStr, InsertAtEnd);
+ static InvokeInst *Create(Function *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return Create(Func->getFunctionType(), Func, IfNormal, IfException, Args,
+ Bundles, NameStr, InsertAtEnd);
}
+ // Deprecated [opaque pointer types]
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ const Twine &NameStr,
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, None, NameStr,
+ InsertBefore);
+ }
+
+ // Deprecated [opaque pointer types]
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, NameStr,
+ InsertBefore);
+ }
+
+ // Deprecated [opaque pointer types]
static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
- unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, NameStr, InsertAtEnd);
+ }
- return new (Values, DescriptorBytes)
- InvokeInst(Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
- InsertAtEnd);
+ // Deprecated [opaque pointer types]
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, NameStr,
+ InsertAtEnd);
}
/// Create a clone of \p II with a different set of operand bundles and
@@ -4017,43 +3781,18 @@ public:
addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
}
- /// Return the function called, or null if this is an
- /// indirect function invocation.
- ///
- Function *getCalledFunction() const {
- return dyn_cast<Function>(Op<-3>());
- }
-
- /// Get a pointer to the function that is invoked by this
- /// instruction
- const Value *getCalledValue() const { return Op<-3>(); }
- Value *getCalledValue() { return Op<-3>(); }
-
- /// Set the function called.
- void setCalledFunction(Value* Fn) {
- setCalledFunction(
- cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
- Fn);
- }
- void setCalledFunction(FunctionType *FTy, Value *Fn) {
- this->FTy = FTy;
- assert(FTy == cast<FunctionType>(
- cast<PointerType>(Fn->getType())->getElementType()));
- Op<-3>() = Fn;
- }
-
// get*Dest - Return the destination basic blocks...
BasicBlock *getNormalDest() const {
- return cast<BasicBlock>(Op<-2>());
+ return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
}
BasicBlock *getUnwindDest() const {
- return cast<BasicBlock>(Op<-1>());
+ return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
}
void setNormalDest(BasicBlock *B) {
- Op<-2>() = reinterpret_cast<Value*>(B);
+ Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
}
void setUnwindDest(BasicBlock *B) {
- Op<-1>() = reinterpret_cast<Value*>(B);
+ Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
}
/// Get the landingpad instruction from the landing pad
@@ -4065,9 +3804,12 @@ public:
return i == 0 ? getNormalDest() : getUnwindDest();
}
- void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
- assert(idx < 2 && "Successor # out of range for invoke!");
- *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
+ void setSuccessor(unsigned i, BasicBlock *NewSucc) {
+ assert(i < 2 && "Successor # out of range for invoke!");
+ if (i == 0)
+ setNormalDest(NewSucc);
+ else
+ setUnwindDest(NewSucc);
}
unsigned getNumSuccessors() const { return 2; }
@@ -4089,36 +3831,26 @@ private:
}
};
-template <>
-struct OperandTraits<CallBase<InvokeInst>>
- : public VariadicOperandTraits<CallBase<InvokeInst>, 3> {};
-
InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ ArrayRef<OperandBundleDef> Bundles, int NumOperands,
const Twine &NameStr, Instruction *InsertBefore)
- : CallBase<InvokeInst>(Ty->getReturnType(), Instruction::Invoke,
- OperandTraits<CallBase<InvokeInst>>::op_end(this) -
- Values,
- Values, InsertBefore) {
+ : CallBase(Ty->getReturnType(), Instruction::Invoke,
+ OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
+ InsertBefore) {
init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
}
-InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
+InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ ArrayRef<OperandBundleDef> Bundles, int NumOperands,
const Twine &NameStr, BasicBlock *InsertAtEnd)
- : CallBase<InvokeInst>(
- cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType())
- ->getReturnType(),
- Instruction::Invoke,
- OperandTraits<CallBase<InvokeInst>>::op_end(this) - Values, Values,
- InsertAtEnd) {
- init(Func, IfNormal, IfException, Args, Bundles, NameStr);
+ : CallBase(Ty->getReturnType(), Instruction::Invoke,
+ OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
+ InsertAtEnd) {
+ init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
}
-
//===----------------------------------------------------------------------===//
// ResumeInst Class
//===----------------------------------------------------------------------===//
@@ -4126,7 +3858,7 @@ InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
//===---------------------------------------------------------------------------
/// Resume the propagation of an exception.
///
-class ResumeInst : public TerminatorInst {
+class ResumeInst : public Instruction {
ResumeInst(const ResumeInst &RI);
explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
@@ -4164,8 +3896,6 @@ public:
}
private:
- friend TerminatorInst;
-
BasicBlock *getSuccessor(unsigned idx) const {
llvm_unreachable("ResumeInst has no successors!");
}
@@ -4185,7 +3915,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
//===----------------------------------------------------------------------===//
// CatchSwitchInst Class
//===----------------------------------------------------------------------===//
-class CatchSwitchInst : public TerminatorInst {
+class CatchSwitchInst : public Instruction {
/// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
@@ -4451,7 +4181,7 @@ public:
// CatchReturnInst Class
//===----------------------------------------------------------------------===//
-class CatchReturnInst : public TerminatorInst {
+class CatchReturnInst : public Instruction {
CatchReturnInst(const CatchReturnInst &RI);
CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
@@ -4511,8 +4241,6 @@ public:
}
private:
- friend TerminatorInst;
-
BasicBlock *getSuccessor(unsigned Idx) const {
assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
return getSuccessor();
@@ -4534,7 +4262,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
// CleanupReturnInst Class
//===----------------------------------------------------------------------===//
-class CleanupReturnInst : public TerminatorInst {
+class CleanupReturnInst : public Instruction {
private:
CleanupReturnInst(const CleanupReturnInst &RI);
CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
@@ -4607,8 +4335,6 @@ public:
}
private:
- friend TerminatorInst;
-
BasicBlock *getSuccessor(unsigned Idx) const {
assert(Idx == 0);
return getUnwindDest();
@@ -4641,7 +4367,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
/// presence of this instruction indicates some higher level knowledge that the
/// end of the block cannot be reached.
///
-class UnreachableInst : public TerminatorInst {
+class UnreachableInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -4668,8 +4394,6 @@ public:
}
private:
- friend TerminatorInst;
-
BasicBlock *getSuccessor(unsigned idx) const {
llvm_unreachable("UnreachableInst has no successors!");
}
@@ -5248,6 +4972,25 @@ inline Value *getPointerOperand(Value *V) {
return nullptr;
}
+/// A helper function that returns the alignment of load or store instruction.
+inline unsigned getLoadStoreAlignment(Value *I) {
+ assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
+ "Expected Load or Store instruction");
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ return LI->getAlignment();
+ return cast<StoreInst>(I)->getAlignment();
+}
+
+/// A helper function that returns the address space of the pointer operand of
+/// load or store instruction.
+inline unsigned getLoadStoreAddressSpace(Value *I) {
+ assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
+ "Expected Load or Store instruction");
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ return LI->getPointerAddressSpace();
+ return cast<StoreInst>(I)->getPointerAddressSpace();
+}
+
} // end namespace llvm
#endif // LLVM_IR_INSTRUCTIONS_H
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicInst.h b/contrib/llvm/include/llvm/IR/IntrinsicInst.h
index 6650afcca7fb..80a7a7052574 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/contrib/llvm/include/llvm/IR/IntrinsicInst.h
@@ -66,6 +66,27 @@ namespace llvm {
/// This is the common base class for debug info intrinsics.
class DbgInfoIntrinsic : public IntrinsicInst {
public:
+ /// \name Casting methods
+ /// @{
+ static bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::dbg_addr:
+ case Intrinsic::dbg_label:
+ return true;
+ default: return false;
+ }
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ /// @}
+ };
+
+ /// This is the common base class for debug info intrinsics for variables.
+ class DbgVariableIntrinsic : public DbgInfoIntrinsic {
+ public:
/// Get the location corresponding to the variable referenced by the debug
/// info intrinsic. Depending on the intrinsic, this could be the
/// variable's value or its address.
@@ -104,7 +125,6 @@ namespace llvm {
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
case Intrinsic::dbg_addr:
- case Intrinsic::dbg_label:
return true;
default: return false;
}
@@ -116,7 +136,7 @@ namespace llvm {
};
/// This represents the llvm.dbg.declare instruction.
- class DbgDeclareInst : public DbgInfoIntrinsic {
+ class DbgDeclareInst : public DbgVariableIntrinsic {
public:
Value *getAddress() const { return getVariableLocation(); }
@@ -132,7 +152,7 @@ namespace llvm {
};
/// This represents the llvm.dbg.addr instruction.
- class DbgAddrIntrinsic : public DbgInfoIntrinsic {
+ class DbgAddrIntrinsic : public DbgVariableIntrinsic {
public:
Value *getAddress() const { return getVariableLocation(); }
@@ -147,7 +167,7 @@ namespace llvm {
};
/// This represents the llvm.dbg.value instruction.
- class DbgValueInst : public DbgInfoIntrinsic {
+ class DbgValueInst : public DbgVariableIntrinsic {
public:
Value *getValue() const {
return getVariableLocation(/* AllowNullOp = */ false);
@@ -168,17 +188,13 @@ namespace llvm {
class DbgLabelInst : public DbgInfoIntrinsic {
public:
DILabel *getLabel() const {
- return cast<DILabel>(getRawVariable());
+ return cast<DILabel>(getRawLabel());
}
- Metadata *getRawVariable() const {
+ Metadata *getRawLabel() const {
return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
}
- Metadata *getRawExpression() const {
- return nullptr;
- }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
/// @{
static bool classof(const IntrinsicInst *I) {
@@ -235,6 +251,12 @@ namespace llvm {
case Intrinsic::experimental_constrained_log2:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
+ case Intrinsic::experimental_constrained_maxnum:
+ case Intrinsic::experimental_constrained_minnum:
+ case Intrinsic::experimental_constrained_ceil:
+ case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_round:
+ case Intrinsic::experimental_constrained_trunc:
return true;
default: return false;
}
diff --git a/contrib/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm/include/llvm/IR/Intrinsics.td
index 0cec754dd649..64603d8ea030 100644
--- a/contrib/llvm/include/llvm/IR/Intrinsics.td
+++ b/contrib/llvm/include/llvm/IR/Intrinsics.td
@@ -90,6 +90,10 @@ class ReadNone<int argNo> : IntrinsicProperty {
def IntrNoReturn : IntrinsicProperty;
+// IntrCold - Calls to this intrinsic are cold.
+// Parallels the cold attribute on LLVM IR functions.
+def IntrCold : IntrinsicProperty;
+
// IntrNoduplicate - Calls to this intrinsic cannot be duplicated.
// Parallels the noduplicate attribute on LLVM IR functions.
def IntrNoDuplicate : IntrinsicProperty;
@@ -315,11 +319,84 @@ def int_gcwrite : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
[IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>;
+//===------------------- ObjC ARC runtime Intrinsics --------------------===//
+//
+// Note these are to support the Objective-C ARC optimizer which wants to
+// eliminate retain and releases where possible.
+
+def int_objc_autorelease : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_autoreleasePoolPop : Intrinsic<[], [llvm_ptr_ty]>;
+def int_objc_autoreleasePoolPush : Intrinsic<[llvm_ptr_ty], []>;
+def int_objc_autoreleaseReturnValue : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_copyWeak : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+def int_objc_destroyWeak : Intrinsic<[], [llvm_ptrptr_ty]>;
+def int_objc_initWeak : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptrptr_ty,
+ llvm_ptr_ty]>;
+def int_objc_loadWeak : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptrptr_ty]>;
+def int_objc_loadWeakRetained : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptrptr_ty]>;
+def int_objc_moveWeak : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+def int_objc_release : Intrinsic<[], [llvm_ptr_ty]>;
+def int_objc_retain : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retainAutorelease : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retainAutoreleaseReturnValue : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retainAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retainBlock : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_storeStrong : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptr_ty]>;
+def int_objc_storeWeak : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptrptr_ty,
+ llvm_ptr_ty]>;
+def int_objc_clang_arc_use : Intrinsic<[],
+ [llvm_vararg_ty]>;
+def int_objc_unsafeClaimAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retainedObject : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_unretainedObject : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_unretainedPointer : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_retain_autorelease : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty]>;
+def int_objc_sync_enter : Intrinsic<[llvm_i32_ty],
+ [llvm_ptr_ty]>;
+def int_objc_sync_exit : Intrinsic<[llvm_i32_ty],
+ [llvm_ptr_ty]>;
+def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+def int_objc_arc_annotation_topdown_bbend : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+def int_objc_arc_annotation_bottomup_bbstart : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[],
+ [llvm_ptrptr_ty,
+ llvm_ptrptr_ty]>;
+
+
//===--------------------- Code Generator Intrinsics ----------------------===//
//
def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_sponentry : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
[IntrReadMem], "llvm.read_register">;
def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
@@ -337,6 +414,13 @@ def int_localescape : Intrinsic<[], [llvm_vararg_ty]>;
def int_localrecover : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
[IntrNoMem]>;
+
+// Given the frame pointer passed into an SEH filter function, returns a
+// pointer to the local variable area suitable for use with llvm.localrecover.
+def int_eh_recoverfp : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_ptr_ty],
+ [IntrNoMem]>;
+
// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
// model their dependencies on allocas.
def int_stacksave : Intrinsic<[llvm_ptr_ty]>,
@@ -453,6 +537,14 @@ def int_maxnum : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable, Commutative]
>;
+def int_minimum : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable, Commutative]
+>;
+def int_maximum : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable, Commutative]
+>;
// NOTE: these are internal interfaces.
def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
@@ -557,9 +649,35 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_maxnum : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_minnum : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_ceil : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_floor : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_trunc : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
}
// FIXME: Add intrinsics for fcmp, fptrunc, fpext, fptoui and fptosi.
-// FIXME: Add intrinsics for fabs, copysign, floor, ceil, trunc and round?
+// FIXME: Add intrinsics for fabs and copysign?
//===------------------------- Expect Intrinsics --------------------------===//
@@ -700,6 +818,27 @@ def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
+//===------------------------- Saturation Arithmetic Intrinsics ---------------------===//
+//
+def int_sadd_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable, Commutative]>;
+def int_uadd_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable, Commutative]>;
+def int_ssub_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_usub_sat : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+
+//===------------------------- Fixed Point Arithmetic Intrinsics ---------------------===//
+//
+def int_smul_fix : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, Commutative]>;
+
//===------------------------- Memory Use Markers -------------------------===//
//
def int_lifetime_start : Intrinsic<[],
@@ -817,7 +956,7 @@ def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
//
def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
GCCBuiltin<"__builtin_flt_rounds">;
-def int_trap : Intrinsic<[], [], [IntrNoReturn]>,
+def int_trap : Intrinsic<[], [], [IntrNoReturn, IntrCold]>,
GCCBuiltin<"__builtin_trap">;
def int_debugtrap : Intrinsic<[]>,
GCCBuiltin<"__builtin_debugtrap">;
@@ -830,6 +969,10 @@ def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty],
def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
[Throws]>;
+// Supports widenable conditions for guards represented as explicit branches.
+def int_experimental_widenable_condition : Intrinsic<[llvm_i1_ty], [],
+ [IntrInaccessibleMemOnly]>;
+
// NOP: calls/invokes to this intrinsic are removed by codegen
def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
@@ -850,6 +993,10 @@ def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
[], "llvm.clear_cache">;
+// Intrinsic to detect whether its argument is a constant.
+def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty], [IntrNoMem], "llvm.is.constant">;
+
+
//===-------------------------- Masked Intrinsics -------------------------===//
//
def int_masked_store : Intrinsic<[], [llvm_anyvector_ty,
@@ -1008,3 +1155,4 @@ include "llvm/IR/IntrinsicsAMDGPU.td"
include "llvm/IR/IntrinsicsBPF.td"
include "llvm/IR/IntrinsicsSystemZ.td"
include "llvm/IR/IntrinsicsWebAssembly.td"
+include "llvm/IR/IntrinsicsRISCV.td"
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td b/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 688e863c1afe..ff25750fe399 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -44,6 +44,12 @@ def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">, Intri
def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">, Intrinsic<[], [llvm_i32_ty]>;
def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">, Intrinsic<[], [llvm_i32_ty]>;
+// A space-consuming intrinsic primarily for testing block and jump table
+// placements. The first argument is the number of bytes this "instruction"
+// takes up, the second and return value are essentially chains, used to force
+// ordering during ISel.
+def int_aarch64_space : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>;
+
}
//===----------------------------------------------------------------------===//
@@ -154,6 +160,11 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem]>;
+
+ class AdvSIMD_FP16FML_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
}
// Arithmetic ops
@@ -424,6 +435,12 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
// v8.2-A Dot Product
def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;
+
+ // v8.2-A FP16 Fused Multiply-Add Long
+ def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
+ def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
+ def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic;
+ def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic;
}
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 9f361410b9b8..7913ce828fbc 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -590,7 +590,7 @@ class AMDGPUDimSampleProfile<string opmod,
AMDGPUDimProps dim,
AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
let IsSample = 1;
- let RetTypes = [llvm_anyfloat_ty];
+ let RetTypes = [llvm_any_ty];
let ExtraAddrArgs = sample.ExtraAddrArgs;
let Gradients = sample.Gradients;
let LodClampMip = sample.LodOrClamp;
@@ -683,11 +683,11 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
}
defm int_amdgcn_image_load
- : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_anyfloat_ty], [], [IntrReadMem],
+ : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
[SDNPMemOperand]>,
AMDGPUImageDMaskIntrinsic;
defm int_amdgcn_image_load_mip
- : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_anyfloat_ty], [],
+ : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
[IntrReadMem], [SDNPMemOperand], 1>,
AMDGPUImageDMaskIntrinsic;
@@ -802,6 +802,14 @@ class AMDGPUBufferLoad : Intrinsic <
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
+def int_amdgcn_s_buffer_load : Intrinsic <
+ [llvm_any_ty],
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // byte offset(SGPR/VGPR/imm)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc)
+ [IntrNoMem]>,
+ AMDGPURsrcIntrinsic<0>;
+
class AMDGPUBufferStore : Intrinsic <
[],
[llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select f32, v2f32, v4f32
@@ -815,6 +823,124 @@ class AMDGPUBufferStore : Intrinsic <
def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
def int_amdgcn_buffer_store : AMDGPUBufferStore;
+// New buffer intrinsics with separate raw and struct variants. The raw
+// variant never has an index. The struct variant always has an index, even if
+// it is const 0. A struct intrinsic with constant 0 index is different to the
+// corresponding raw intrinsic on gfx9+ because the behavior of bound checking
+// and swizzling changes depending on whether idxen is set in the instruction.
+// These new instrinsics also keep the offset and soffset arguments separate as
+// they behave differently in bounds checking and swizzling.
+class AMDGPURawBufferLoad : Intrinsic <
+ [llvm_any_ty],
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrReadMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad;
+def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
+
+class AMDGPUStructBufferLoad : Intrinsic <
+ [llvm_any_ty],
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrReadMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
+def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
+
+class AMDGPURawBufferStore : Intrinsic <
+ [],
+ [llvm_any_ty, // vdata(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrWriteMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1>;
+def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore;
+def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
+
+class AMDGPUStructBufferStore : Intrinsic <
+ [],
+ [llvm_any_ty, // vdata(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrWriteMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1>;
+def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
+def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
+
+class AMDGPURawBufferAtomic : Intrinsic <
+ [llvm_anyint_ty],
+ [LLVMMatchType<0>, // vdata(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
+ [], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1, 0>;
+def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
+def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
+ [llvm_anyint_ty],
+ [LLVMMatchType<0>, // src(VGPR)
+ LLVMMatchType<0>, // cmp(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
+ [], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<2, 0>;
+
+class AMDGPUStructBufferAtomic : Intrinsic <
+ [llvm_anyint_ty],
+ [LLVMMatchType<0>, // vdata(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
+ [], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1, 0>;
+def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
+def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
+ [llvm_anyint_ty],
+ [LLVMMatchType<0>, // src(VGPR)
+ LLVMMatchType<0>, // cmp(VGPR)
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
+ [], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<2, 0>;
+
+// Obsolescent tbuffer intrinsics.
def int_amdgcn_tbuffer_load : Intrinsic <
[llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
[llvm_v4i32_ty, // rsrc(SGPR)
@@ -844,6 +970,54 @@ def int_amdgcn_tbuffer_store : Intrinsic <
[IntrWriteMem], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<1>;
+// New tbuffer intrinsics, with:
+// - raw and struct variants
+// - joint format field
+// - joint cachepolicy field
+def int_amdgcn_raw_tbuffer_load : Intrinsic <
+ [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrReadMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+
+def int_amdgcn_raw_tbuffer_store : Intrinsic <
+ [],
+ [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrWriteMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1>;
+
+def int_amdgcn_struct_tbuffer_load : Intrinsic <
+ [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrReadMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+
+def int_amdgcn_struct_tbuffer_store : Intrinsic <
+ [],
+ [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
+ llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // vindex(VGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
+ llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
+ [IntrWriteMem], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<1>;
+
class AMDGPUBufferAtomic : Intrinsic <
[llvm_i32_ty],
[llvm_i32_ty, // vdata(VGPR)
@@ -1310,18 +1484,10 @@ def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
[llvm_i64_ty], [IntrConvergent]
>;
-def int_amdgcn_break : Intrinsic<[llvm_i64_ty],
- [llvm_i64_ty], [IntrNoMem, IntrConvergent]
->;
-
def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty],
[llvm_i1_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
>;
-def int_amdgcn_else_break : Intrinsic<[llvm_i64_ty],
- [llvm_i64_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
->;
-
def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
[llvm_i64_ty], [IntrConvergent]
>;
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td b/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td
index 25f4215d68a8..ecc69a679553 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td
@@ -15,7 +15,7 @@
//
// All Hexagon intrinsics start with "llvm.hexagon.".
let TargetPrefix = "hexagon" in {
- /// Hexagon_Intrinsic - Base class for all Hexagon intrinsics.
+ /// Hexagon_Intrinsic - Base class for the majority of Hexagon intrinsics.
class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
list<LLVMType> param_types,
list<IntrinsicProperty> properties>
@@ -30,397 +30,6 @@ let TargetPrefix = "hexagon" in {
: Intrinsic<ret_types, param_types, properties>;
}
-//===----------------------------------------------------------------------===//
-//
-// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
-// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_ptr_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
-// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i16_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
-// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
-// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
-// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
-// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
-// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
-// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
-// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
-// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
-// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
-// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
-// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
-// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
-// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
-// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
-// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
-// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
-// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_SIDI,BT_BOOL,BT_INT,BT_LONGLONG) ->
-// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_sidi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_DISI,BT_BOOL,BT_LONGLONG,BT_INT) ->
-// Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
-// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
-// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_i1_ty, llvm_i1_ty, llvm_i1_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
-// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
-// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
-// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
-// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
-// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
-// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
-// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
-// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
-// BT_LONGLONG,BT_INT) ->
-// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
-// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
- llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
-// BT_LONGLONG) ->
-// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
-// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
-// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
-// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
-// BT_LONGLONG) ->
-// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
- llvm_i64_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
-// BT_BOOL) ->
-// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
-// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
-// BT_LONGLONG,BT_INT,BT_INT) ->
-// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-
class Hexagon_mem_memmemsi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
@@ -457,191 +66,6 @@ class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
llvm_i32_ty, llvm_i32_ty],
[IntrWriteMem]>;
-class Hexagon_v256_v256v256_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
- [IntrArgMemOnly]>;
-
-//
-// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_i32_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_double_ty],
- [IntrNoMem]>;
-//
-// Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-//
-// Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_float_ty],
- [IntrNoMem]>;
-//
-// Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_float_ty],
- [IntrNoMem]>;
-//
-// Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty],
- [IntrNoMem]>;
-//
-// Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty],
- [IntrNoMem]>;
-//
-// Hexagon_si_df_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_df_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty],
- [IntrNoMem]>;
-//
-// Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty, llvm_float_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_float_ty, llvm_i32_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i1_ty], [llvm_float_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
- llvm_float_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
- llvm_float_ty,
- llvm_i32_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_dididisi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem]>;
-//
-// Hexagon_df_si_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_si_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_i32_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_df_di_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_di_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_i64_ty],
- [IntrNoMem]>;
-//
-// Hexagon_di_df_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_di_df_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_double_ty],
- [IntrNoMem]>;
-//
-// Hexagon_df_df_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_df_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_double_ty],
- [IntrNoMem]>;
-//
-// Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty, llvm_double_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_double_ty, llvm_i32_ty],
- [IntrNoMem, Throws]>;
-//
-//
-// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
- llvm_double_ty],
- [IntrNoMem, Throws]>;
-//
-// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
-//
-class Hexagon_df_dfdfdfqi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
- llvm_double_ty,
- llvm_i32_ty],
- [IntrNoMem, Throws]>;
-
-
-// This one below will not be auto-generated,
-// so make sure, you don't overwrite this one.
//
// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
//
@@ -699,4204 +123,6 @@ Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
def int_hexagon_circ_stb :
Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
-
-def int_hexagon_mm256i_vaddw :
-Hexagon_v256_v256v256_Intrinsic<"_mm256i_vaddw">;
-
-
-// This one above will not be auto-generated,
-// so make sure, you don't overwrite this one.
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpeq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgt :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgt">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgtu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtu">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
-//
-def int_hexagon_C2_cmpeqp :
-Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpeqp">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
-//
-def int_hexagon_C2_cmpgtp :
-Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtp">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
-//
-def int_hexagon_C2_cmpgtup :
-Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtup">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpeqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpneqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpeq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_rcmpneq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneq">;
-//
-// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_bitsset :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsset">;
-//
-// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_bitsclr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclr">;
-//
-// BUILTIN_INFO(HEXAGON.C4_nbitsset,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_nbitsset :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsset">;
-//
-// BUILTIN_INFO(HEXAGON.C4_nbitsclr,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_nbitsclr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclr">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpeqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeqi">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgti :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgti">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgtui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtui">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgei :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgei">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpgeui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgeui">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmplt :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmplt">;
-//
-// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_cmpltu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpltu">;
-//
-// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
-//
-def int_hexagon_C2_bitsclri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclri">;
-//
-// BUILTIN_INFO(HEXAGON.C4_nbitsclri,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_nbitsclri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclri">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpneqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpltei :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpltei">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplteui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteui">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmpneq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneq">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplte :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplte">;
-//
-// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
-//
-def int_hexagon_C4_cmplteu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteu">;
-//
-// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
-//
-def int_hexagon_C2_and :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_and">;
-//
-// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
-//
-def int_hexagon_C2_or :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_or">;
-//
-// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
-//
-def int_hexagon_C2_xor :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_xor">;
-//
-// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
-//
-def int_hexagon_C2_andn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_andn">;
-//
-// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
-//
-def int_hexagon_C2_not :
-Hexagon_si_si_Intrinsic<"HEXAGON_C2_not">;
-//
-// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
-//
-def int_hexagon_C2_orn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_orn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_and">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_or">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_and">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_or">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_andn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_and_orn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_orn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
-//
-def int_hexagon_C4_or_orn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_orn">;
-//
-// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
-//
-def int_hexagon_C2_pxfer_map :
-Hexagon_si_qi_Intrinsic<"HEXAGON_C2_pxfer_map">;
-//
-// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
-//
-def int_hexagon_C2_any8 :
-Hexagon_si_qi_Intrinsic<"HEXAGON_C2_any8">;
-//
-// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
-//
-def int_hexagon_C2_all8 :
-Hexagon_si_qi_Intrinsic<"HEXAGON_C2_all8">;
-//
-// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
-//
-def int_hexagon_C2_vitpack :
-Hexagon_si_qiqi_Intrinsic<"HEXAGON_C2_vitpack">;
-//
-// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
-//
-def int_hexagon_C2_mux :
-Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_mux">;
-//
-// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
-//
-def int_hexagon_C2_muxii :
-Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxii">;
-//
-// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
-//
-def int_hexagon_C2_muxir :
-Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxir">;
-//
-// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
-//
-def int_hexagon_C2_muxri :
-Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxri">;
-//
-// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
-//
-def int_hexagon_C2_vmux :
-Hexagon_di_qididi_Intrinsic<"HEXAGON_C2_vmux">;
-//
-// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
-//
-def int_hexagon_C2_mask :
-Hexagon_di_qi_Intrinsic<"HEXAGON_C2_mask">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpbeq :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbeq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpbeqi,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpbeqi :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbeqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpbeq_any,QI_ftype_DIDI,2)
-//
-def int_hexagon_A4_vcmpbeq_any :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpbgtu :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpbgtui,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpbgtui :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgtui">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpbgt,QI_ftype_DIDI,2)
-//
-def int_hexagon_A4_vcmpbgt :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbgt">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpbgti,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpbgti :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgti">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbeq,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbeq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbeqi,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbeqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbgtu,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbgtu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbgtui,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbgtui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtui">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbgt,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbgt :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgt">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpbgti,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpbgti :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgti">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpheq :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpheq">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmphgt :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgt">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmphgtu :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgtu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpheqi,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpheqi :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpheqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmphgti,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmphgti :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgti">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmphgtui,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmphgtui :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgtui">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpheq,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpheq :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheq">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmphgt,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmphgt :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgt">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmphgtu,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmphgtu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmpheqi,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmpheqi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmphgti,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmphgti :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgti">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cmphgtui,QI_ftype_SISI,2)
-//
-def int_hexagon_A4_cmphgtui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtui">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpweq :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpweq">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpwgt :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgt">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vcmpwgtu :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpweqi,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpweqi :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpweqi">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpwgti,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpwgti :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgti">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vcmpwgtui,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_vcmpwgtui :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgtui">;
-//
-// BUILTIN_INFO(HEXAGON.A4_boundscheck,QI_ftype_SIDI,2)
-//
-def int_hexagon_A4_boundscheck :
-Hexagon_si_sidi_Intrinsic<"HEXAGON_A4_boundscheck">;
-//
-// BUILTIN_INFO(HEXAGON.A4_tlbmatch,QI_ftype_DISI,2)
-//
-def int_hexagon_A4_tlbmatch :
-Hexagon_si_disi_Intrinsic<"HEXAGON_A4_tlbmatch">;
-//
-// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
-//
-def int_hexagon_C2_tfrpr :
-Hexagon_si_qi_Intrinsic<"HEXAGON_C2_tfrpr">;
-//
-// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
-//
-def int_hexagon_C2_tfrrp :
-Hexagon_si_si_Intrinsic<"HEXAGON_C2_tfrrp">;
-//
-// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
-//
-def int_hexagon_C4_fastcorner9 :
-Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9">;
-//
-// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
-//
-def int_hexagon_C4_fastcorner9_not :
-Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_acc_sat_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpy_nac_sat_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_rnd_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_acc_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyd_nac_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_hh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_hh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_hl_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_hl_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_lh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_lh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_ll_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_ll_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_hh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_hh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_hl_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_hl_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_lh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_lh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_ll_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyd_rnd_ll_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_acc_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_hh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_hh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_hl_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_hl_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_lh_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_lh_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_ll_s0 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_mpyu_nac_ll_s1 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_hh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_hh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_hl_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_hl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_lh_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_lh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_ll_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_ll_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_acc_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_hh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_hh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_hl_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_hl_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_lh_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_lh_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_ll_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_mpyud_nac_ll_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_hh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_hh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_hl_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_hl_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_lh_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_lh_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_ll_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyud_ll_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpysmi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysmi">;
-//
-// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_macsip :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsip">;
-//
-// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_macsin :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsin">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_dpmpyss_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_dpmpyss_acc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_dpmpyss_nac_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
-//
-def int_hexagon_M2_dpmpyuu_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_dpmpyuu_acc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_dpmpyuu_nac_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_up :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_up_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpy_up_s1_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyu_up :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_up">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpysu_up,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpysu_up :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysu_up">;
-//
-// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_dpmpyss_rnd_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mac_up_s1_sat,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mac_up_s1_sat :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
-//
-// BUILTIN_INFO(HEXAGON.M4_nac_up_s1_sat,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_nac_up_s1_sat :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyi">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_mpyui :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyui">;
-//
-// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_maci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_maci">;
-//
-// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_acci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_acci">;
-//
-// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_accii :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_accii">;
-//
-// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_nacci :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_nacci">;
-//
-// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_naccii :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_naccii">;
-//
-// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_subacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_subacc">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mpyrr_addr,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mpyrr_addr :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mpyri_addr_u2,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mpyri_addr_u2 :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr_u2">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mpyri_addr,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mpyri_addr :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mpyri_addi,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mpyri_addi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addi">;
-//
-// BUILTIN_INFO(HEXAGON.M4_mpyrr_addi,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_mpyrr_addi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addi">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2s_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2s_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_vmac2s_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_vmac2s_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2su_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2su_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2su_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_vmac2su_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2su_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_vmac2su_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2s_s0pack :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_vmpy2s_s1pack :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_vmac2 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vmpy2es_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vmpy2es_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vmac2es_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vmac2es_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vmac2es :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vrmac_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrmac_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vrmpy_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vdmpyrs_s0 :
-Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vdmpyrs_s1 :
-Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vrmpybuu,DI_ftype_DIDI,2)
-//
-def int_hexagon_M5_vrmpybuu :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybuu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vrmacbuu,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M5_vrmacbuu :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbuu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vrmpybsu,DI_ftype_DIDI,2)
-//
-def int_hexagon_M5_vrmpybsu :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybsu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vrmacbsu,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M5_vrmacbsu :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbsu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vmpybuu,DI_ftype_SISI,2)
-//
-def int_hexagon_M5_vmpybuu :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybuu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vmpybsu,DI_ftype_SISI,2)
-//
-def int_hexagon_M5_vmpybsu :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybsu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vmacbuu,DI_ftype_DISISI,3)
-//
-def int_hexagon_M5_vmacbuu :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbuu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vmacbsu,DI_ftype_DISISI,3)
-//
-def int_hexagon_M5_vmacbsu :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbsu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vdmpybsu,DI_ftype_DIDI,2)
-//
-def int_hexagon_M5_vdmpybsu :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vdmpybsu">;
-//
-// BUILTIN_INFO(HEXAGON.M5_vdmacbsu,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M5_vdmacbsu :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vdmacbsu">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vdmacs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vdmacs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vdmpys_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vdmpys_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyrs_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyrs_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyrsc_s0 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyrsc_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmacs_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmacs_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmacsc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmacsc_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpys_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpys_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpysc_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpysc_s1 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cnacs_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cnacs_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cnacsc_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cnacsc_s1 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
-//
-def int_hexagon_M2_vrcmpys_s1 :
-Hexagon_di_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
-//
-def int_hexagon_M2_vrcmpys_acc_s1 :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
-//
-def int_hexagon_M2_vrcmpys_s1rp :
-Hexagon_si_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacls_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacls_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmachs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmachs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyl_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyl_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacls_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacls_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmachs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmachs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyl_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyl_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyh_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyh_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M4_vrmpyeh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M4_vrmpyeh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_vrmpyeh_acc_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_vrmpyeh_acc_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M4_vrmpyoh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M4_vrmpyoh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_vrmpyoh_acc_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_vrmpyoh_acc_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_hmmpyl_rs1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_hmmpyh_rs1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_hmmpyl_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_hmmpyl_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_hmmpyh_s1,SI_ftype_SISI,2)
-//
-def int_hexagon_M2_hmmpyh_s1 :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmaculs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmaculs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacuhs_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacuhs_s1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyul_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyul_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyuh_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyuh_s1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmaculs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmaculs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacuhs_rs0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_mmacuhs_rs1 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyul_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyul_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyuh_rs0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_mmpyuh_rs1 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vrcmaci_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vrcmacr_s0 :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vrcmaci_s0c :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vrcmacr_s0c :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmaci_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmaci_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
-//
-def int_hexagon_M2_cmacr_s0 :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vrcmpyi_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vrcmpyr_s0 :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vrcmpyi_s0c :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vrcmpyr_s0c :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyi_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
-//
-def int_hexagon_M2_cmpyr_s0 :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.M4_cmpyi_wh,SI_ftype_DISI,2)
-//
-def int_hexagon_M4_cmpyi_wh :
-Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
-//
-// BUILTIN_INFO(HEXAGON.M4_cmpyr_wh,SI_ftype_DISI,2)
-//
-def int_hexagon_M4_cmpyr_wh :
-Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
-//
-// BUILTIN_INFO(HEXAGON.M4_cmpyi_whc,SI_ftype_DISI,2)
-//
-def int_hexagon_M4_cmpyi_whc :
-Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
-//
-// BUILTIN_INFO(HEXAGON.M4_cmpyr_whc,SI_ftype_DISI,2)
-//
-def int_hexagon_M4_cmpyr_whc :
-Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vcmpy_s0_sat_i :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vcmpy_s0_sat_r :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vcmpy_s1_sat_i :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vcmpy_s1_sat_r :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vcmac_s0_sat_i :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M2_vcmac_s0_sat_r :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_vcrotate :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcrotate">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vrcrotate_acc,DI_ftype_DIDISISI,4)
-//
-def int_hexagon_S4_vrcrotate_acc :
-Hexagon_di_didisisi_Intrinsic<"HEXAGON_S4_vrcrotate_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vrcrotate,DI_ftype_DISISI,3)
-//
-def int_hexagon_S4_vrcrotate :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_vrcrotate">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vcnegh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_vcnegh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcnegh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vrcnegh,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_vrcnegh :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vrcnegh">;
-//
-// BUILTIN_INFO(HEXAGON.M4_pmpyw,DI_ftype_SISI,2)
-//
-def int_hexagon_M4_pmpyw :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_pmpyw">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vpmpyh,DI_ftype_SISI,2)
-//
-def int_hexagon_M4_vpmpyh :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_vpmpyh">;
-//
-// BUILTIN_INFO(HEXAGON.M4_pmpyw_acc,DI_ftype_DISISI,3)
-//
-def int_hexagon_M4_pmpyw_acc :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
-//
-// BUILTIN_INFO(HEXAGON.M4_vpmpyh_acc,DI_ftype_DISISI,3)
-//
-def int_hexagon_M4_vpmpyh_acc :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_add :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_add">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_sub :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_sub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addsat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subsat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addi">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_l16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_l16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_l16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_l16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_l16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_l16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_l16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_l16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_sat_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_addh_h16_sat_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_sat_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_sat_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_sat_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subh_h16_sat_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
-//
-def int_hexagon_A2_aslh :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_aslh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
-//
-def int_hexagon_A2_asrh :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_asrh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_addp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_addpsat :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addpsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
-//
-def int_hexagon_A2_addsp :
-Hexagon_di_sidi_Intrinsic<"HEXAGON_A2_addsp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_subp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_subp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
-//
-def int_hexagon_A2_neg :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_neg">;
-//
-// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
-//
-def int_hexagon_A2_negsat :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_negsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
-//
-def int_hexagon_A2_abs :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_abs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
-//
-def int_hexagon_A2_abssat :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_abssat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
-//
-def int_hexagon_A2_vconj :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_vconj">;
-//
-// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
-//
-def int_hexagon_A2_negp :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_negp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
-//
-def int_hexagon_A2_absp :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_absp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_max :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_max">;
-//
-// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
-//
-def int_hexagon_A2_maxu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_maxu">;
-//
-// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_min :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_min">;
-//
-// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
-//
-def int_hexagon_A2_minu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_minu">;
-//
-// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_maxp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
-//
-def int_hexagon_A2_maxup :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxup">;
-//
-// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_minp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
-//
-def int_hexagon_A2_minup :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minup">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
-//
-def int_hexagon_A2_tfr :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
-//
-def int_hexagon_A2_tfrsi :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfrsi">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
-//
-def int_hexagon_A2_tfrp :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_tfrp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
-//
-def int_hexagon_A2_tfrpi :
-Hexagon_di_si_Intrinsic<"HEXAGON_A2_tfrpi">;
-//
-// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
-//
-def int_hexagon_A2_zxtb :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxtb">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
-//
-def int_hexagon_A2_sxtb :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxtb">;
-//
-// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
-//
-def int_hexagon_A2_zxth :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxth">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
-//
-def int_hexagon_A2_sxth :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxth">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
-//
-def int_hexagon_A2_combinew :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combinew">;
-//
-// BUILTIN_INFO(HEXAGON.A4_combineri,DI_ftype_SISI,2)
-//
-def int_hexagon_A4_combineri :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineri">;
-//
-// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_SISI,2)
-//
-def int_hexagon_A4_combineir :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineir">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
-//
-def int_hexagon_A2_combineii :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combineii">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_combine_hh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_combine_hl :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_combine_lh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_lh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_combine_ll :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_ll">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_tfril :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfril">;
-//
-// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_tfrih :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfrih">;
-//
-// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_and :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_and">;
-//
-// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_or :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_or">;
-//
-// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_xor :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_xor">;
-//
-// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
-//
-def int_hexagon_A2_not :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_not">;
-//
-// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
-//
-def int_hexagon_M2_xor_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_xor_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_M4_xor_xacc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_xor_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_andn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_andn">;
-//
-// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_orn :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_orn">;
-//
-// BUILTIN_INFO(HEXAGON.A4_andnp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A4_andnp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A4_andnp">;
-//
-// BUILTIN_INFO(HEXAGON.A4_ornp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A4_ornp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A4_ornp">;
-//
-// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_addaddi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addaddi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_subaddi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subaddi">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_and">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_andn">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_and_xor :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_xor">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_and">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_or_xor :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_xor">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_andix :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andix">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_andi :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_or_ori :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_ori">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_and">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_or">;
-//
-// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
-//
-def int_hexagon_M4_xor_andn :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_andn">;
-//
-// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_subri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subri">;
-//
-// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_andir :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_andir">;
-//
-// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_orir :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_orir">;
-//
-// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_andp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_andp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_orp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_orp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_xorp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_xorp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
-//
-def int_hexagon_A2_notp :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_notp">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
-//
-def int_hexagon_A2_sxtw :
-Hexagon_di_si_Intrinsic<"HEXAGON_A2_sxtw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
-//
-def int_hexagon_A2_sat :
-Hexagon_si_di_Intrinsic<"HEXAGON_A2_sat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_roundsat,SI_ftype_DI,1)
-//
-def int_hexagon_A2_roundsat :
-Hexagon_si_di_Intrinsic<"HEXAGON_A2_roundsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
-//
-def int_hexagon_A2_sath :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_sath">;
-//
-// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
-//
-def int_hexagon_A2_satuh :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_satuh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
-//
-def int_hexagon_A2_satub :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_satub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
-//
-def int_hexagon_A2_satb :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_satb">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddb_map,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddb_map :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddb_map">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddubs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddubs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vadduhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vadduhs">;
-//
-// BUILTIN_INFO(HEXAGON.A5_vaddhubs,SI_ftype_DIDI,2)
-//
-def int_hexagon_A5_vaddhubs :
-Hexagon_si_didi_Intrinsic<"HEXAGON_A5_vaddhubs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vaddws :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddws">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxaddsubw,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxaddsubw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubw">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxsubaddw,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxsubaddw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddw">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxaddsubh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxaddsubh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubh">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxsubaddh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxsubaddh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddh">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxaddsubhr,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxaddsubhr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
-//
-// BUILTIN_INFO(HEXAGON.S4_vxsubaddhr,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_vxsubaddhr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svavgh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavgh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svavghs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavghs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svnavgh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svnavgh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svaddh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svaddhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svadduhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svadduhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svsubh :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svsubhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
-//
-def int_hexagon_A2_svsubuhs :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubuhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vraddub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vraddub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_A2_vraddub_acc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vraddub_acc">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vraddh,SI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vraddh :
-Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vraddh">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vradduh :
-Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vradduh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubb_map,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubb_map :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubb_map">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsububs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsububs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubuhs :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubuhs">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vsubws :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubws">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
-//
-def int_hexagon_A2_vabsh :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
-//
-def int_hexagon_A2_vabshsat :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabshsat">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
-//
-def int_hexagon_A2_vabsw :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
-//
-def int_hexagon_A2_vabswsat :
-Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabswsat">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vabsdiffw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffw">;
-//
-// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
-//
-def int_hexagon_M2_vabsdiffh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vrsadub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vrsadub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_A2_vrsadub_acc :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavguh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavgh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavgw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavgwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgwcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwcr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavgwcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwcr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavghcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghcr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavghcr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghcr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavguw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavguwr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguwr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavgubr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgubr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavguhr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguhr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vavghr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghr">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vnavghr :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghr">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_ri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_rr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_ri_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri_sat">;
-//
-// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_round_rr_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr_sat">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_cround_ri :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_ri">;
-//
-// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_cround_rr :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_rr">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrminh,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrminh :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminh">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrmaxh,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrmaxh :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxh">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrminuh,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrminuh :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuh">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrmaxuh,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrmaxuh :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuh">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrminw,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrminw :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminw">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrmaxw,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrmaxw :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxw">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrminuw,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrminuw :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuw">;
-//
-// BUILTIN_INFO(HEXAGON.A4_vrmaxuw,DI_ftype_DIDISI,3)
-//
-def int_hexagon_A4_vrmaxuw :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminb,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminb :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminb">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxb,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxb :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxb">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxub :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxub">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminuh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxuh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuh">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vminuw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuw">;
-//
-// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
-//
-def int_hexagon_A2_vmaxuw :
-Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuw">;
-//
-// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
-//
-def int_hexagon_A4_modwrapu :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_modwrapu">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfadd,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfadd :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfadd">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfsub,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfsub :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfsub">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfmpy,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfmpy :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmpy">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffma,SF_ftype_SFSFSF,3)
-//
-def int_hexagon_F2_sffma :
-Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffma_sc,SF_ftype_SFSFSFQI,4)
-//
-def int_hexagon_F2_sffma_sc :
-Hexagon_sf_sfsfsfqi_Intrinsic<"HEXAGON_F2_sffma_sc">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffms,SF_ftype_SFSFSF,3)
-//
-def int_hexagon_F2_sffms :
-Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffma_lib,SF_ftype_SFSFSF,3)
-//
-def int_hexagon_F2_sffma_lib :
-Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma_lib">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffms_lib,SF_ftype_SFSFSF,3)
-//
-def int_hexagon_F2_sffms_lib :
-Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms_lib">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfcmpeq,QI_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfcmpeq :
-Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfcmpgt,QI_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfcmpgt :
-Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpgt">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfcmpge,QI_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfcmpge :
-Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpge">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfcmpuo,QI_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfcmpuo :
-Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpuo">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfmax,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfmax :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmax">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfmin,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sfmin :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmin">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfclass,QI_ftype_SFSI,2)
-//
-def int_hexagon_F2_sfclass :
-Hexagon_si_sfsi_Intrinsic<"HEXAGON_F2_sfclass">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfimm_p,SF_ftype_SI,1)
-//
-def int_hexagon_F2_sfimm_p :
-Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_p">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sfimm_n,SF_ftype_SI,1)
-//
-def int_hexagon_F2_sfimm_n :
-Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_n">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffixupn,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sffixupn :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupn">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffixupd,SF_ftype_SFSF,2)
-//
-def int_hexagon_F2_sffixupd :
-Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupd">;
-//
-// BUILTIN_INFO(HEXAGON.F2_sffixupr,SF_ftype_SF,1)
-//
-def int_hexagon_F2_sffixupr :
-Hexagon_sf_sf_Intrinsic<"HEXAGON_F2_sffixupr">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfcmpeq,QI_ftype_DFDF,2)
-//
-def int_hexagon_F2_dfcmpeq :
-Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfcmpgt,QI_ftype_DFDF,2)
-//
-def int_hexagon_F2_dfcmpgt :
-Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpgt">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfcmpge,QI_ftype_DFDF,2)
-//
-def int_hexagon_F2_dfcmpge :
-Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpge">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfcmpuo,QI_ftype_DFDF,2)
-//
-def int_hexagon_F2_dfcmpuo :
-Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpuo">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfclass,QI_ftype_DFSI,2)
-//
-def int_hexagon_F2_dfclass :
-Hexagon_si_dfsi_Intrinsic<"HEXAGON_F2_dfclass">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfimm_p,DF_ftype_SI,1)
-//
-def int_hexagon_F2_dfimm_p :
-Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_p">;
-//
-// BUILTIN_INFO(HEXAGON.F2_dfimm_n,DF_ftype_SI,1)
-//
-def int_hexagon_F2_dfimm_n :
-Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_n">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2df,DF_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2df :
-Hexagon_df_sf_Intrinsic<"HEXAGON_F2_conv_sf2df">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2sf,SF_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2sf :
-Hexagon_sf_df_Intrinsic<"HEXAGON_F2_conv_df2sf">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_uw2sf,SF_ftype_SI,1)
-//
-def int_hexagon_F2_conv_uw2sf :
-Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_uw2df,DF_ftype_SI,1)
-//
-def int_hexagon_F2_conv_uw2df :
-Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_uw2df">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_w2sf,SF_ftype_SI,1)
-//
-def int_hexagon_F2_conv_w2sf :
-Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_w2sf">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_w2df,DF_ftype_SI,1)
-//
-def int_hexagon_F2_conv_w2df :
-Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_w2df">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_ud2sf,SF_ftype_DI,1)
-//
-def int_hexagon_F2_conv_ud2sf :
-Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_ud2df,DF_ftype_DI,1)
-//
-def int_hexagon_F2_conv_ud2df :
-Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_ud2df">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_d2sf,SF_ftype_DI,1)
-//
-def int_hexagon_F2_conv_d2sf :
-Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_d2sf">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_d2df,DF_ftype_DI,1)
-//
-def int_hexagon_F2_conv_d2df :
-Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_d2df">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw,SI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2uw :
-Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2w,SI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2w :
-Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud,DI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2ud :
-Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2d,DI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2d :
-Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2uw,SI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2uw :
-Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2w,SI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2w :
-Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2ud,DI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2ud :
-Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2d,DI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2d :
-Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw_chop,SI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2uw_chop :
-Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2w_chop,SI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2w_chop :
-Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud_chop,DI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2ud_chop :
-Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_sf2d_chop,DI_ftype_SF,1)
-//
-def int_hexagon_F2_conv_sf2d_chop :
-Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2uw_chop,SI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2uw_chop :
-Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2w_chop,SI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2w_chop :
-Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2ud_chop,DI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2ud_chop :
-Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
-//
-// BUILTIN_INFO(HEXAGON.F2_conv_df2d_chop,DI_ftype_DF,1)
-//
-def int_hexagon_F2_conv_df2d_chop :
-Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asr_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asl_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_lsr_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_r_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_lsl_r_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsl_r_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsl_r_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsl_r_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsl_r_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsl_r_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsl_r_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsl_r_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsl_r_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsl_r_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsl_r_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_p_xor,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_r_p_xor :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_p_xor,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_r_p_xor :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_xor,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_r_p_xor :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_xor,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsl_r_p_xor :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asr_r_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asl_r_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asr_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_lsr_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asl_i_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_i_p :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_i_r_acc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_i_p_acc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_i_r_nac :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_i_p_nac :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_i_r_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_i_r_xacc :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_i_p_xacc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_i_p_xacc :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_i_r_and :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asr_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_lsr_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_asl_i_r_or :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_i_p_and :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asr_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_lsr_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_asl_i_p_or :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asl_i_r_sat :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r_sat">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asr_i_r_rnd :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_p_rnd :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd_goodsyntax,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S4_lsli,SI_ftype_SISI,2)
-//
-def int_hexagon_S4_lsli :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_lsli">;
-//
-// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_addasl_rrri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_addasl_rrri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_andi_asl_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_andi_asl_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_ori_asl_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_ori_asl_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_addi_asl_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_addi_asl_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_subi_asl_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_subi_asl_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_andi_lsr_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_andi_lsr_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_ori_lsr_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_ori_lsr_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_addi_lsr_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_addi_lsr_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S4_subi_lsr_ri,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_subi_lsr_ri :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_valignib :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_valignib">;
-//
-// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
-//
-def int_hexagon_S2_valignrb :
-Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_valignrb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
-//
-def int_hexagon_S2_vspliceib :
-Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vspliceib">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
-//
-def int_hexagon_S2_vsplicerb :
-Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_vsplicerb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
-//
-def int_hexagon_S2_vsplatrh :
-Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsplatrh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
-//
-def int_hexagon_S2_vsplatrb :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_vsplatrb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
-//
-def int_hexagon_S2_insert :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_insert">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
-//
-def int_hexagon_S2_tableidxb_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
-//
-def int_hexagon_S2_tableidxh_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
-//
-def int_hexagon_S2_tableidxw_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
-//
-def int_hexagon_S2_tableidxd_goodsyntax :
-Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.A4_bitspliti,DI_ftype_SISI,2)
-//
-def int_hexagon_A4_bitspliti :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitspliti">;
-//
-// BUILTIN_INFO(HEXAGON.A4_bitsplit,DI_ftype_SISI,2)
-//
-def int_hexagon_A4_bitsplit :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitsplit">;
-//
-// BUILTIN_INFO(HEXAGON.S4_extract,SI_ftype_SISISI,3)
-//
-def int_hexagon_S4_extract :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_extract">;
-//
-// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
-//
-def int_hexagon_S2_extractu :
-Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_extractu">;
-//
-// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
-//
-def int_hexagon_S2_insertp :
-Hexagon_di_didisisi_Intrinsic<"HEXAGON_S2_insertp">;
-//
-// BUILTIN_INFO(HEXAGON.S4_extractp,DI_ftype_DISISI,3)
-//
-def int_hexagon_S4_extractp :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_extractp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
-//
-def int_hexagon_S2_extractup :
-Hexagon_di_disisi_Intrinsic<"HEXAGON_S2_extractup">;
-//
-// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
-//
-def int_hexagon_S2_insert_rp :
-Hexagon_si_sisidi_Intrinsic<"HEXAGON_S2_insert_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S4_extract_rp,SI_ftype_SIDI,2)
-//
-def int_hexagon_S4_extract_rp :
-Hexagon_si_sidi_Intrinsic<"HEXAGON_S4_extract_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
-//
-def int_hexagon_S2_extractu_rp :
-Hexagon_si_sidi_Intrinsic<"HEXAGON_S2_extractu_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
-//
-def int_hexagon_S2_insertp_rp :
-Hexagon_di_dididi_Intrinsic<"HEXAGON_S2_insertp_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S4_extractp_rp,DI_ftype_DIDI,2)
-//
-def int_hexagon_S4_extractp_rp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S4_extractp_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_extractup_rp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_extractup_rp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
-//
-def int_hexagon_S2_tstbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.S4_ntstbit_i,QI_ftype_SISI,2)
-//
-def int_hexagon_S4_ntstbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_setbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_togglebit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_i">;
-//
-// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_clrbit_i :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
-//
-def int_hexagon_S2_tstbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.S4_ntstbit_r,QI_ftype_SISI,2)
-//
-def int_hexagon_S4_ntstbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_setbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_togglebit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
-//
-def int_hexagon_S2_clrbit_r :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_i_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S5_asrhub_rnd_sat_goodsyntax,SI_ftype_DISI,2)
-//
-def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
-Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S5_asrhub_sat,SI_ftype_DISI,2)
-//
-def int_hexagon_S5_asrhub_sat :
-Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_sat">;
-//
-// BUILTIN_INFO(HEXAGON.S5_vasrhrnd_goodsyntax,DI_ftype_DISI,2)
-//
-def int_hexagon_S5_vasrhrnd_goodsyntax :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsl_r_vh :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_i_svw_trun :
-Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_i_svw_trun">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_r_svw_trun :
-Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_i_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asr_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_asl_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsr_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
-//
-def int_hexagon_S2_lsl_r_vw :
-Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vrndpackwh :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vrndpackwhs :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
-//
-def int_hexagon_S2_vsxtbh :
-Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxtbh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
-//
-def int_hexagon_S2_vzxtbh :
-Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxtbh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vsathub :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathub">;
-//
-// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
-//
-def int_hexagon_S2_svsathub :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathub">;
-//
-// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
-//
-def int_hexagon_S2_svsathb :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vsathb :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vtrunohb :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunohb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_vtrunewh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunewh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_vtrunowh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunowh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vtrunehb :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunehb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
-//
-def int_hexagon_S2_vsxthw :
-Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxthw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
-//
-def int_hexagon_S2_vzxthw :
-Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxthw">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vsatwh :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
-//
-def int_hexagon_S2_vsatwuh :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwuh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
-//
-def int_hexagon_S2_packhl :
-Hexagon_di_sisi_Intrinsic<"HEXAGON_S2_packhl">;
-//
-// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
-//
-def int_hexagon_A2_swiz :
-Hexagon_si_si_Intrinsic<"HEXAGON_A2_swiz">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
-//
-def int_hexagon_S2_vsathub_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
-//
-def int_hexagon_S2_vsathb_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
-//
-def int_hexagon_S2_vsatwh_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
-//
-def int_hexagon_S2_vsatwuh_nopack :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_shuffob :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffob">;
-//
-// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_shuffeb :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_shuffoh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffoh">;
-//
-// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_shuffeh :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeh">;
-//
-// BUILTIN_INFO(HEXAGON.S5_popcountp,SI_ftype_DI,1)
-//
-def int_hexagon_S5_popcountp :
-Hexagon_si_di_Intrinsic<"HEXAGON_S5_popcountp">;
-//
-// BUILTIN_INFO(HEXAGON.S4_parity,SI_ftype_SISI,2)
-//
-def int_hexagon_S4_parity :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_parity">;
-//
-// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
-//
-def int_hexagon_S2_parityp :
-Hexagon_si_didi_Intrinsic<"HEXAGON_S2_parityp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
-//
-def int_hexagon_S2_lfsp :
-Hexagon_di_didi_Intrinsic<"HEXAGON_S2_lfsp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
-//
-def int_hexagon_S2_clbnorm :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_clbnorm">;
-//
-// BUILTIN_INFO(HEXAGON.S4_clbaddi,SI_ftype_SISI,2)
-//
-def int_hexagon_S4_clbaddi :
-Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_clbaddi">;
-//
-// BUILTIN_INFO(HEXAGON.S4_clbpnorm,SI_ftype_DI,1)
-//
-def int_hexagon_S4_clbpnorm :
-Hexagon_si_di_Intrinsic<"HEXAGON_S4_clbpnorm">;
-//
-// BUILTIN_INFO(HEXAGON.S4_clbpaddi,SI_ftype_DISI,2)
-//
-def int_hexagon_S4_clbpaddi :
-Hexagon_si_disi_Intrinsic<"HEXAGON_S4_clbpaddi">;
-//
-// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
-//
-def int_hexagon_S2_clb :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_clb">;
-//
-// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
-//
-def int_hexagon_S2_cl0 :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl0">;
-//
-// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
-//
-def int_hexagon_S2_cl1 :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl1">;
-//
-// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
-//
-def int_hexagon_S2_clbp :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_clbp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
-//
-def int_hexagon_S2_cl0p :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl0p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
-//
-def int_hexagon_S2_cl1p :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl1p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
-//
-def int_hexagon_S2_brev :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_brev">;
-//
-// BUILTIN_INFO(HEXAGON.S2_brevp,DI_ftype_DI,1)
-//
-def int_hexagon_S2_brevp :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_brevp">;
-//
-// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
-//
-def int_hexagon_S2_ct0 :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct0">;
-//
-// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
-//
-def int_hexagon_S2_ct1 :
-Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct1">;
-//
-// BUILTIN_INFO(HEXAGON.S2_ct0p,SI_ftype_DI,1)
-//
-def int_hexagon_S2_ct0p :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct0p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_ct1p,SI_ftype_DI,1)
-//
-def int_hexagon_S2_ct1p :
-Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct1p">;
-//
-// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
-//
-def int_hexagon_S2_interleave :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_interleave">;
-//
-// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
-//
-def int_hexagon_S2_deinterleave :
-Hexagon_di_di_Intrinsic<"HEXAGON_S2_deinterleave">;
-
//
// BUILTIN_INFO(HEXAGON.dcfetch_A,v_ftype_DI*,1)
//
@@ -4934,6042 +160,6197 @@ def int_hexagon_S4_stored_locked :
Hexagon_Intrinsic<"HEXAGON_S4_stored_locked", [llvm_i32_ty],
[llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<0>]>;
-// V60
+def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
+ [], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, ReadOnly<1>]>;
-class Hexagon_v2048v2048_Intrinsic_T<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_hi_W
-// tag : V6_lo_W
-class Hexagon_v512v1024_Intrinsic_T<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// tag : V6_hi_W_128B
-// tag : V6_lo_W_128B
-class Hexagon_v1024v2048_Intrinsic_T<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v64i32_ty],
- [IntrNoMem]>;
+def int_hexagon_vmemset : Hexagon_Intrinsic<"hexagon_vmemset",
+ [], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
-class Hexagon_v1024v1024_Intrinsic_T<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
-
-// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
-// tag : V6_hi
-def int_hexagon_V6_hi :
-Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_hi">;
+multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
+ def NAME#_pci : Hexagon_NonGCC_Intrinsic<
+ [ElTy, llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
+ [IntrArgMemOnly, NoCapture<3>]>;
+ def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
+ [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
+ [IntrArgMemOnly, NoCapture<2>]>;
+}
-// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
-// tag : V6_lo
-def int_hexagon_V6_lo :
-Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_lo">;
+defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrb : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadruh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadri : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_L2_loadrd : Hexagon_custom_circ_ld_Intrinsic<llvm_i64_ty>;
-// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
-// tag : V6_hi_128B
-def int_hexagon_V6_hi_128B :
-Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_hi_128B">;
+multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
+ def NAME#_pci : Hexagon_NonGCC_Intrinsic<
+ [llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
+ [IntrArgMemOnly, NoCapture<4>]>;
+ def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
+ [IntrArgMemOnly, NoCapture<3>]>;
+}
-// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
-// tag : V6_lo_128B
-def int_hexagon_V6_lo_128B :
-Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_lo_128B">;
+defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerh : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
+defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;
-// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
-// tag : V6_vassignp
-def int_hexagon_V6_vassignp :
-Hexagon_v1024v1024_Intrinsic_T<"HEXAGON_V6_vassignp">;
+// The front-end emits the intrinsic call with only two arguments. The third
+// argument from the builtin is already used by front-end to write to memory
+// by generating a store.
+class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
+ : Hexagon_NonGCC_Intrinsic<
+ [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadMem]>;
-// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
-// tag : V6_vassignp_128B
-def int_hexagon_V6_vassignp_128B :
-Hexagon_v2048v2048_Intrinsic_T<"HEXAGON_V6_vassignp_128B">;
+def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
+def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;
+def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
+def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
+def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
+def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
+def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
//
-// Hexagon_iii_Intrinsic<string GCCIntSuffix>
-// tag : S6_rol_i_r
-class Hexagon_iii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
-
+// Masked vector stores
//
-// Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
-// tag : S6_rol_i_p
-class Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
- [IntrNoMem]>;
//
-// Hexagon_iiii_Intrinsic<string GCCIntSuffix>
-// tag : S6_rol_i_r_acc
-class Hexagon_iiii_Intrinsic<string GCCIntSuffix>
+// Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
+// tag: V6_vS32b_qpred_ai
+class Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_v512i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
+ [IntrArgMemOnly]>;
//
-// Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
-// tag : S6_rol_i_p_acc
-class Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
+// Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
+// tag: V6_vS32b_qpred_ai_128B
+class Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_v1024i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_valignb
-class Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_qpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai">;
-//
-// Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_valignb_128B
-class Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nqpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai">;
-//
-// Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vror
-class Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nt_qpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai">;
-//
-// Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vror_128B
-class Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nt_nqpred_ai :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai">;
-//
-// Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vunpackub
-class Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_qpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B">;
-//
-// Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vunpackub_128B
-class Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nqpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B">;
-//
-// Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vunpackob
-class Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B">;
-//
-// Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vunpackob_128B
-class Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B">;
-//
-// Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vpackeb
-class Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstoreq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstoreq">;
-//
-// Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vpackeb_128B
-class Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorenq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorenq">;
-//
-// Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpybus_dv_128B
-class Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorentq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentq">;
-//
-// Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpybus_dv_acc_128B
-class Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorentnq :
+Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentnq">;
-//
-// Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhvsat_acc
-class Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstoreq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstoreq_128B">;
-//
-// Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhvsat_acc_128B
-class Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorenq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorenq_128B">;
-//
-// Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhisat
-class Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorentq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentq_128B">;
-//
-// Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhisat_128B
-class Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaskedstorentnq_128B :
+Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentnq_128B">;
-//
-// Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhisat_acc
-class Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemiiv512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+ llvm_v16i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vdmpyhisat_acc_128B
-class Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemiiv1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+ llvm_v32i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyubi
-class Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemiiv2048_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
+ llvm_v64i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyubi_128B
-class Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemv64iiiv512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v16i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyubi_acc
-class Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemv128iiiv1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v32i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyubi_acc_128B
-class Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemv64iiiv1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v32i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddb_dv_128B
-class Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vvmemv128iiiv2048_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
- [IntrNoMem]>;
+ [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v64i32_ty],
+ [IntrArgMemOnly]>;
-//
-// Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddubh
-class Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
+def int_hexagon_V6_vgathermw :
+Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermw">;
+
+def int_hexagon_V6_vgathermw_128B :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermw_128B">;
+
+def int_hexagon_V6_vgathermh :
+Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermh">;
+
+def int_hexagon_V6_vgathermh_128B :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermh_128B">;
+
+def int_hexagon_V6_vgathermhw :
+Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermhw">;
+
+def int_hexagon_V6_vgathermhw_128B :
+Hexagon_V65_vvmemiiv2048_Intrinsic<"HEXAGON_V6_vgathermhw_128B">;
+
+def int_hexagon_V6_vgathermwq :
+Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermwq">;
+
+def int_hexagon_V6_vgathermwq_128B :
+Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermwq_128B">;
+
+def int_hexagon_V6_vgathermhq :
+Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermhq">;
+
+def int_hexagon_V6_vgathermhq_128B :
+Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhq_128B">;
+
+def int_hexagon_V6_vgathermhwq :
+Hexagon_V65_vvmemv64iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhwq">;
+
+def int_hexagon_V6_vgathermhwq_128B :
+Hexagon_V65_vvmemv128iiiv2048_Intrinsic<"HEXAGON_V6_vgathermhwq_128B">;
+
+class Hexagon_V65_viiv512v512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+ [], [llvm_i32_ty,llvm_i32_ty,
+ llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddubh_128B
-class Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_viiv1024v1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+ [], [llvm_i32_ty,llvm_i32_ty,
+ llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vd0
-class Hexagon_v512_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vv64iiiv512v512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [],
- [IntrNoMem]>;
+ [], [llvm_v512i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v16i32_ty,
+ llvm_v16i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vd0_128B
-class Hexagon_v1024_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vv128iiiv1024v1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [],
- [IntrNoMem]>;
+ [], [llvm_v1024i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v32i32_ty,
+ llvm_v32i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddbq
-class Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_viiv1024v512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+ [], [llvm_i32_ty,llvm_i32_ty,
+ llvm_v32i32_ty,llvm_v16i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddbq_128B
-class Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_viiv2048v1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+ [], [llvm_i32_ty,llvm_i32_ty,
+ llvm_v64i32_ty,llvm_v32i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vabsh
-class Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vv64iiiv1024v512_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty],
- [IntrNoMem]>;
+ [], [llvm_v512i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v32i32_ty,
+ llvm_v16i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vabsh_128B
-class Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_vv128iiiv2048v1024_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
+ [], [llvm_v1024i1_ty,llvm_i32_ty,
+ llvm_i32_ty,llvm_v64i32_ty,
+ llvm_v32i32_ty],
+ [IntrWriteMem]>;
-//
-// Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpybv_acc
-class Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+class Hexagon_V65_v2048_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [llvm_v64i32_ty], [],
[IntrNoMem]>;
//
-// Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpybv_acc_128B
-class Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermw,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw
+def int_hexagon_V6_vscattermw :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw">;
//
-// Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyub
-class Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_128B
+def int_hexagon_V6_vscattermw_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_128B">;
//
-// Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyub_128B
-class Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermh,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh
+def int_hexagon_V6_vscattermh :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh">;
//
-// Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyub_acc
-class Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_128B
+def int_hexagon_V6_vscattermh_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_128B">;
//
-// Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyub_acc_128B
-class Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_add,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_add
+def int_hexagon_V6_vscattermw_add :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw_add">;
//
-// Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandqrt
-class Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermw_add_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermw_add_128B
+def int_hexagon_V6_vscattermw_add_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_add_128B">;
//
-// Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandqrt_128B
-class Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_add,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_add
+def int_hexagon_V6_vscattermh_add :
+Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh_add">;
//
-// Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandqrt_acc
-class Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermh_add_128B,v_ftype_SISIVIVI,4)
+// tag : V6_vscattermh_add_128B
+def int_hexagon_V6_vscattermh_add_128B :
+Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_add_128B">;
//
-// Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandqrt_acc_128B
-class Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermwq,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermwq
+def int_hexagon_V6_vscattermwq :
+Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermwq">;
//
-// Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvrt
-class Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermwq_128B,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermwq_128B
+def int_hexagon_V6_vscattermwq_128B :
+Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermwq_128B">;
//
-// Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvrt_128B
-class Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhq,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermhq
+def int_hexagon_V6_vscattermhq :
+Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermhq">;
//
-// Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvrt_acc
-class Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhq_128B,v_ftype_QVSISIVIVI,5)
+// tag : V6_vscattermhq_128B
+def int_hexagon_V6_vscattermhq_128B :
+Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermhq_128B">;
//
-// Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvrt_acc_128B
-class Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw
+def int_hexagon_V6_vscattermhw :
+Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw">;
//
-// Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vgtw
-class Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_128B,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_128B
+def int_hexagon_V6_vscattermhw_128B :
+Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_128B">;
//
-// Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vgtw_128B
-class Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhwq,v_ftype_QVSISIVDVI,5)
+// tag : V6_vscattermhwq
+def int_hexagon_V6_vscattermhwq :
+Hexagon_V65_vv64iiiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhwq">;
//
-// Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vgtw_and
-class Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhwq_128B,v_ftype_QVSISIVDVI,5)
+// tag : V6_vscattermhwq_128B
+def int_hexagon_V6_vscattermhwq_128B :
+Hexagon_V65_vv128iiiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhwq_128B">;
//
-// Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vgtw_and_128B
-class Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_add
+def int_hexagon_V6_vscattermhw_add :
+Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw_add">;
//
-// Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add_128B,v_ftype_SISIVDVI,4)
+// tag : V6_vscattermhw_add_128B
+def int_hexagon_V6_vscattermhw_add_128B :
+Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B">;
+
+// Auto-generated intrinsics
+
+// tag : S2_vsatwh
+class Hexagon_i32_i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybusv
+class Hexagon_v16i32_v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybusv
+class Hexagon_v32i32_v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vaslw_acc
+class Hexagon_v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vaslw_acc
+class Hexagon_v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmux
+class Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmux
+class Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : S2_tableidxd_goodsyntax
+class Hexagon_i32_i32i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandnqrt_acc
+class Hexagon_v16i32_v16i32v512i1i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandnqrt_acc
+class Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybusi
+class Hexagon_v32i32_v32i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybusi
+class Hexagon_v64i32_v64i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vsubb_dv
+class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+ [IntrNoMem]>;
+
+// tag : M2_mpysu_up
+class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : M2_mpyud_acc_ll_s0
+class Hexagon_i64_i64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : S2_lsr_i_r_nac
+class Hexagon_i32_i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : M2_cmpysc_s0
+class Hexagon_i64_i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_lo
+class Hexagon_v16i32_v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_lo
+class Hexagon_v32i32_v64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty],
+ [IntrNoMem]>;
+
+// tag : S2_shuffoh
+class Hexagon_i64_i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : F2_sfmax
+class Hexagon_float_floatfloat_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : A2_vabswsat
+class Hexagon_i64_i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag :
+class Hexagon_v32i32_v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_ldnp0
+class Hexagon_v16i32_i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_ldnp0
+class Hexagon_v32i32_i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vdmpyhb
+class Hexagon_v16i32_v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vdmpyhb
+class Hexagon_v32i32_v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : A4_vcmphgti
+class Hexagon_i32_i64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag :
+class Hexagon_v32i32_v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : S6_rol_i_p_or
+class Hexagon_i64_i64i64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vgtuh_and
+class Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vgtuh_and
+class Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : A2_abssat
+class Hexagon_i32_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : A2_vcmpwgtu
+class Hexagon_i32_i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vtmpybus_acc
+class Hexagon_v64i32_v64i32v64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_conv_df2uw_chop
+class Hexagon_i32_double_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+
// tag : V6_pred_or
-class Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
+class Hexagon_v512i1_v512i1v512i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_or_128B
-class Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
+// tag : V6_pred_or
+class Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_not
-class Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty],
- [IntrNoMem]>;
+// tag : S2_asr_i_p_rnd_goodsyntax
+class Hexagon_i64_i64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_not_128B
-class Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty],
- [IntrNoMem]>;
+// tag : F2_conv_w2df
+class Hexagon_double_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_scalar2
-class Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vunpackuh
+class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_scalar2_128B
-class Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vunpackuh
+class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vswap
-class Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+// tag : V6_vadduhw_acc
+class Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vswap_128B
-class Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+// tag : V6_vadduhw_acc
+class Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vshuffvdd
-class Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// tag : M2_vdmacs_s0
+class Hexagon_i64_i64i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vshuffvdd_128B
-class Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vrmpybub_rtt_acc
+class Hexagon_v32i32_v32i32v16i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybub_rtt_acc
+class Hexagon_v64i32_v64i32v32i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+// tag : V6_ldu0
+class Hexagon_v16i32_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_ldu0
+class Hexagon_v32i32_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : S4_extract_rp
+class Hexagon_i32_i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vdmpyhsuisat
+class Hexagon_v16i32_v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vdmpyhsuisat
+class Hexagon_v32i32_v64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : A2_addsp
+class Hexagon_i64_i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
// tag : V6_extractw
-class Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+class Hexagon_i32_v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_extractw_128B
-class Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_extractw
+class Hexagon_i32_v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_lvsplatw
-class Hexagon_v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vlutvwhi
+class Hexagon_v32i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_lvsplatw_128B
-class Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vlutvwhi
+class Hexagon_v64i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vgtuh
+class Hexagon_v512i1_v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vgtuh
+class Hexagon_v1024i1_v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_sffma_lib
+class Hexagon_float_floatfloatfloat_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : F2_conv_ud2df
+class Hexagon_double_i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : S2_vzxthw
+class Hexagon_i64_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vtmpyhb
+class Hexagon_v64i32_v64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vshufoeh
+class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vshufoeh
+class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vlut4
+class Hexagon_v16i32_v16i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vlut4
+class Hexagon_v32i32_v32i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag :
+class Hexagon_v16i32_v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_conv_uw2sf
+class Hexagon_float_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vswap
+class Hexagon_v32i32_v512i1v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vswap
+class Hexagon_v64i32_v1024i1v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandnqrt
+class Hexagon_v16i32_v512i1i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandnqrt
+class Hexagon_v32i32_v1024i1i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmpyub
+class Hexagon_v64i32_v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : A5_ACS
+class Hexagon_i64i32_i64i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vunpackob
+class Hexagon_v32i32_v32i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vunpackob
+class Hexagon_v64i32_v64i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmpyhsat_acc
+class Hexagon_v32i32_v32i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmpyhsat_acc
+class Hexagon_v64i32_v64i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vaddcarrysat
+class Hexagon_v16i32_v16i32v16i32v512i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vaddcarrysat
+class Hexagon_v32i32_v32i32v32i32v1024i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
// tag : V6_vlutvvb_oracc
-class Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+class Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvvb_oracc_128B
-class Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vlutvvb_oracc
+class Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybub_rtt
+class Hexagon_v32i32_v16i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrmpybub_rtt
+class Hexagon_v64i32_v32i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : A4_addp_c
+class Hexagon_i64i32_i64i64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrsadubi_acc
+class Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vrsadubi_acc
+class Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_conv_df2sf
+class Hexagon_float_double_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandvqv
+class Hexagon_v16i32_v512i1v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandvqv
+class Hexagon_v32i32_v1024i1v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : C2_vmux
+class Hexagon_i64_i32i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : F2_sfcmpeq
+class Hexagon_i32_floatfloat_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : V6_vmpahhsat
+class Hexagon_v16i32_v16i32v16i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmpahhsat
+class Hexagon_v32i32_v32i32v32i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandvrt
+class Hexagon_v512i1_v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandvrt
+class Hexagon_v1024i1_v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vsubcarry
+class Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v16i32_ty,llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vsubcarry
+class Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B
+ : Hexagon_NonGCC_Intrinsic<
+ [llvm_v32i32_ty,llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
+ [IntrNoMem]>;
+
+// tag : F2_sffixupr
+class Hexagon_float_float_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : V6_vandvrt_acc
+class Hexagon_v512i1_v512i1v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vandvrt_acc
+class Hexagon_v1024i1_v1024i1v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_dfsub
+class Hexagon_double_doubledouble_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty,llvm_double_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : V6_vmpyowh_sacc
+class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vmpyowh_sacc
+class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : S2_insertp
+class Hexagon_i64_i64i64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : F2_sfinvsqrta
+class Hexagon_floati32_float_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty,llvm_i32_ty], [llvm_float_ty],
+ [IntrNoMem, Throws]>;
+
+// tag : V6_vtran2x2_map
+class Hexagon_v16i32v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty,llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_vtran2x2_map
+class Hexagon_v32i32v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty,llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
// tag : V6_vlutvwh_oracc
-class Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+class Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvwh_oracc_128B
-class Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+// tag : V6_vlutvwh_oracc
+class Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
-// tag: V6_vS32b_qpred_ai
-class Hexagon_vv64ivmemv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
- [IntrArgMemOnly]>;
+// tag : F2_dfcmpge
+class Hexagon_i32_doubledouble_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty,llvm_double_ty],
+ [IntrNoMem, Throws]>;
-//
-// Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
-// tag: V6_vS32b_qpred_ai_128B
-class Hexagon_vv128ivmemv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
+// tag : F2_conv_df2d_chop
+class Hexagon_i64_double_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_double_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r,SI_ftype_SISI,2)
-// tag : S6_rol_i_r
-def int_hexagon_S6_rol_i_r :
-Hexagon_iii_Intrinsic<"HEXAGON_S6_rol_i_r">;
+// tag : F2_conv_sf2w
+class Hexagon_i32_float_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p,DI_ftype_DISI,2)
-// tag : S6_rol_i_p
-def int_hexagon_S6_rol_i_p :
-Hexagon_LLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p">;
+// tag : F2_sfclass
+class Hexagon_i32_floati32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty,llvm_i32_ty],
+ [IntrNoMem, Throws]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r_acc,SI_ftype_SISISI,3)
-// tag : S6_rol_i_r_acc
-def int_hexagon_S6_rol_i_r_acc :
-Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_acc">;
+// tag : F2_conv_sf2ud_chop
+class Hexagon_i64_float_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_float_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p_acc,DI_ftype_DIDISI,3)
-// tag : S6_rol_i_p_acc
-def int_hexagon_S6_rol_i_p_acc :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_acc">;
+// tag : V6_pred_scalar2v2
+class Hexagon_v512i1_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r_nac,SI_ftype_SISISI,3)
-// tag : S6_rol_i_r_nac
-def int_hexagon_S6_rol_i_r_nac :
-Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_nac">;
+// tag : V6_pred_scalar2v2
+class Hexagon_v1024i1_i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p_nac,DI_ftype_DIDISI,3)
-// tag : S6_rol_i_p_nac
-def int_hexagon_S6_rol_i_p_nac :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_nac">;
+// tag : F2_sfrecipa
+class Hexagon_floati32_floatfloat_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty,llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
+ [IntrNoMem, Throws]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r_xacc,SI_ftype_SISISI,3)
-// tag : S6_rol_i_r_xacc
-def int_hexagon_S6_rol_i_r_xacc :
-Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_xacc">;
+// tag : V6_vprefixqh
+class Hexagon_v16i32_v512i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p_xacc,DI_ftype_DIDISI,3)
-// tag : S6_rol_i_p_xacc
-def int_hexagon_S6_rol_i_p_xacc :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_xacc">;
+// tag : V6_vprefixqh
+class Hexagon_v32i32_v1024i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r_and,SI_ftype_SISISI,3)
-// tag : S6_rol_i_r_and
-def int_hexagon_S6_rol_i_r_and :
-Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_and">;
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v16i32_v16i32v32i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_r_or,SI_ftype_SISISI,3)
-// tag : S6_rol_i_r_or
-def int_hexagon_S6_rol_i_r_or :
-Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_or">;
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v32i32_v32i32v64i32i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p_and,DI_ftype_DIDISI,3)
-// tag : S6_rol_i_p_and
-def int_hexagon_S6_rol_i_p_and :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_and">;
+// tag : F2_conv_ud2sf
+class Hexagon_float_i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S6_rol_i_p_or,DI_ftype_DIDISI,3)
-// tag : S6_rol_i_p_or
-def int_hexagon_S6_rol_i_p_or :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_or">;
+// tag : F2_conv_sf2df
+class Hexagon_double_float_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_float_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.S2_cabacencbin,DI_ftype_DIDIQI,3)
-// tag : S2_cabacencbin
-def int_hexagon_S2_cabacencbin :
-Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S2_cabacencbin">;
+// tag : F2_sffma_sc
+class Hexagon_float_floatfloatfloati32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty,llvm_i32_ty],
+ [IntrNoMem, Throws]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_valignb,VI_ftype_VIVISI,3)
-// tag : V6_valignb
-def int_hexagon_V6_valignb :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignb">;
+// tag : F2_dfclass
+class Hexagon_i32_doublei32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty,llvm_i32_ty],
+ [IntrNoMem, Throws]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_valignb_128B,VI_ftype_VIVISI,3)
-// tag : V6_valignb_128B
-def int_hexagon_V6_valignb_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignb_128B">;
+// tag : V6_vd0
+class Hexagon_v16i32__Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlalignb,VI_ftype_VIVISI,3)
-// tag : V6_vlalignb
-def int_hexagon_V6_vlalignb :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignb">;
+// tag : V6_vd0
+class Hexagon_v32i32__Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlalignb_128B,VI_ftype_VIVISI,3)
-// tag : V6_vlalignb_128B
-def int_hexagon_V6_vlalignb_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
+// tag : V6_vdd0
+class Hexagon_v64i32__Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_valignbi,VI_ftype_VIVISI,3)
-// tag : V6_valignbi
-def int_hexagon_V6_valignbi :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignbi">;
+// tag : S2_insert_rp
+class Hexagon_i32_i32i32i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i64_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_valignbi_128B,VI_ftype_VIVISI,3)
-// tag : V6_valignbi_128B
-def int_hexagon_V6_valignbi_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignbi_128B">;
+// tag : V6_vassignp
+class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlalignbi,VI_ftype_VIVISI,3)
-// tag : V6_vlalignbi
-def int_hexagon_V6_vlalignbi :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignbi">;
+// tag : A6_vminub_RdP
+class Hexagon_i64i32_i64i64_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty,llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlalignbi_128B,VI_ftype_VIVISI,3)
-// tag : V6_vlalignbi_128B
-def int_hexagon_V6_vlalignbi_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignbi_128B">;
+// tag : V6_pred_not
+class Hexagon_v512i1_v512i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vror,VI_ftype_VISI,2)
-// tag : V6_vror
-def int_hexagon_V6_vror :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vror">;
+// tag : V6_pred_not
+class Hexagon_v1024i1_v1024i1_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty],
+ [IntrNoMem]>;
-//
-// BUILTIN_INFO(HEXAGON.V6_vror_128B,VI_ftype_VISI,2)
-// tag : V6_vror_128B
-def int_hexagon_V6_vror_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vror_128B">;
+// V5 Scalar Instructions.
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackub,VD_ftype_VI,1)
-// tag : V6_vunpackub
-def int_hexagon_V6_vunpackub :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackub">;
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackub_128B,VD_ftype_VI,1)
-// tag : V6_vunpackub_128B
-def int_hexagon_V6_vunpackub_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
+def int_hexagon_S2_vsatwh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackb,VD_ftype_VI,1)
-// tag : V6_vunpackb
-def int_hexagon_V6_vunpackb :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackb">;
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackb_128B,VD_ftype_VI,1)
-// tag : V6_vunpackb_128B
-def int_hexagon_V6_vunpackb_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
+def int_hexagon_M2_mpysu_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysu_up">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackuh,VD_ftype_VI,1)
-// tag : V6_vunpackuh
-def int_hexagon_V6_vunpackuh :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackuh">;
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackuh_128B,VD_ftype_VI,1)
-// tag : V6_vunpackuh_128B
-def int_hexagon_V6_vunpackuh_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackh,VD_ftype_VI,1)
-// tag : V6_vunpackh
-def int_hexagon_V6_vunpackh :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackh">;
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackh_128B,VD_ftype_VI,1)
-// tag : V6_vunpackh_128B
-def int_hexagon_V6_vunpackh_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackob,VD_ftype_VDVI,2)
-// tag : V6_vunpackob
-def int_hexagon_V6_vunpackob :
-Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackob">;
+def int_hexagon_M4_cmpyi_whc :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackob_128B,VD_ftype_VDVI,2)
-// tag : V6_vunpackob_128B
-def int_hexagon_V6_vunpackob_128B :
-Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackoh,VD_ftype_VDVI,2)
-// tag : V6_vunpackoh
-def int_hexagon_V6_vunpackoh :
-Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackoh">;
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vunpackoh_128B,VD_ftype_VDVI,2)
-// tag : V6_vunpackoh_128B
-def int_hexagon_V6_vunpackoh_128B :
-Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackeb,VI_ftype_VIVI,2)
-// tag : V6_vpackeb
-def int_hexagon_V6_vpackeb :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeb">;
+def int_hexagon_S2_shuffoh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffoh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackeb_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackeb_128B
-def int_hexagon_V6_vpackeb_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
+def int_hexagon_F2_sfmax :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackeh,VI_ftype_VIVI,2)
-// tag : V6_vpackeh
-def int_hexagon_V6_vpackeh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeh">;
+def int_hexagon_A2_vabswsat :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabswsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackeh_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackeh_128B
-def int_hexagon_V6_vpackeh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
+def int_hexagon_S2_asr_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackob,VI_ftype_VIVI,2)
-// tag : V6_vpackob
-def int_hexagon_V6_vpackob :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackob">;
+def int_hexagon_S2_asr_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackob_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackob_128B
-def int_hexagon_V6_vpackob_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackob_128B">;
+def int_hexagon_A4_combineri :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackoh,VI_ftype_VIVI,2)
-// tag : V6_vpackoh
-def int_hexagon_V6_vpackoh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackoh">;
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackoh_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackoh_128B
-def int_hexagon_V6_vpackoh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
+def int_hexagon_M4_vpmpyh_acc :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat,VI_ftype_VIVI,2)
-// tag : V6_vpackhub_sat
-def int_hexagon_V6_vpackhub_sat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackhub_sat_128B
-def int_hexagon_V6_vpackhub_sat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
+def int_hexagon_A2_notp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_notp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat,VI_ftype_VIVI,2)
-// tag : V6_vpackhb_sat
-def int_hexagon_V6_vpackhb_sat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackhb_sat_128B
-def int_hexagon_V6_vpackhb_sat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat,VI_ftype_VIVI,2)
-// tag : V6_vpackwuh_sat
-def int_hexagon_V6_vpackwuh_sat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
+def int_hexagon_C4_or_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackwuh_sat_128B
-def int_hexagon_V6_vpackwuh_sat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat,VI_ftype_VIVI,2)
-// tag : V6_vpackwh_sat
-def int_hexagon_V6_vpackwh_sat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vpackwh_sat_128B
-def int_hexagon_V6_vpackwh_sat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
+def int_hexagon_S2_brevp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_brevp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vzb,VD_ftype_VI,1)
-// tag : V6_vzb
-def int_hexagon_V6_vzb :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzb">;
+def int_hexagon_M4_pmpyw_acc :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vzb_128B,VD_ftype_VI,1)
-// tag : V6_vzb_128B
-def int_hexagon_V6_vzb_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzb_128B">;
+def int_hexagon_S2_cl1 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsb,VD_ftype_VI,1)
-// tag : V6_vsb
-def int_hexagon_V6_vsb :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsb">;
+def int_hexagon_C4_cmplte :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplte">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsb_128B,VD_ftype_VI,1)
-// tag : V6_vsb_128B
-def int_hexagon_V6_vsb_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsb_128B">;
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vzh,VD_ftype_VI,1)
-// tag : V6_vzh
-def int_hexagon_V6_vzh :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzh">;
+def int_hexagon_A2_vaddws :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddws">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vzh_128B,VD_ftype_VI,1)
-// tag : V6_vzh_128B
-def int_hexagon_V6_vzh_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzh_128B">;
+def int_hexagon_A2_maxup :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxup">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsh,VD_ftype_VI,1)
-// tag : V6_vsh
-def int_hexagon_V6_vsh :
-Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsh">;
+def int_hexagon_A4_vcmphgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgti">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsh_128B,VD_ftype_VI,1)
-// tag : V6_vsh_128B
-def int_hexagon_V6_vsh_128B :
-Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsh_128B">;
+def int_hexagon_S2_interleave :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_interleave">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus,VI_ftype_VISI,2)
-// tag : V6_vdmpybus
-def int_hexagon_V6_vdmpybus :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus">;
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_128B,VI_ftype_VISI,2)
-// tag : V6_vdmpybus_128B
-def int_hexagon_V6_vdmpybus_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
+def int_hexagon_A2_abssat :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abssat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc,VI_ftype_VIVISI,3)
-// tag : V6_vdmpybus_acc
-def int_hexagon_V6_vdmpybus_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
+def int_hexagon_A2_vcmpwgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vdmpybus_acc_128B
-def int_hexagon_V6_vdmpybus_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
+def int_hexagon_C2_cmpgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv,VD_ftype_VDSI,2)
-// tag : V6_vdmpybus_dv
-def int_hexagon_V6_vdmpybus_dv :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
+def int_hexagon_C2_cmpgtp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_128B,VD_ftype_VDSI,2)
-// tag : V6_vdmpybus_dv_128B
-def int_hexagon_V6_vdmpybus_dv_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
+def int_hexagon_A4_cmphgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vdmpybus_dv_acc
-def int_hexagon_V6_vdmpybus_dv_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
+def int_hexagon_C2_cmpgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgti">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vdmpybus_dv_acc_128B
-def int_hexagon_V6_vdmpybus_dv_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
+def int_hexagon_M2_mpyi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb,VI_ftype_VISI,2)
-// tag : V6_vdmpyhb
-def int_hexagon_V6_vdmpyhb :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb">;
+def int_hexagon_F2_conv_df2uw_chop :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_128B,VI_ftype_VISI,2)
-// tag : V6_vdmpyhb_128B
-def int_hexagon_V6_vdmpyhb_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
+def int_hexagon_A4_cmpheq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhb_acc
-def int_hexagon_V6_vdmpyhb_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhb_acc_128B
-def int_hexagon_V6_vdmpyhb_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv,VD_ftype_VDSI,2)
-// tag : V6_vdmpyhb_dv
-def int_hexagon_V6_vdmpyhb_dv :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_128B,VD_ftype_VDSI,2)
-// tag : V6_vdmpyhb_dv_128B
-def int_hexagon_V6_vdmpyhb_dv_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
+def int_hexagon_S2_vrcnegh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vrcnegh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vdmpyhb_dv_acc
-def int_hexagon_V6_vdmpyhb_dv_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
+def int_hexagon_S2_extractup :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S2_extractup">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vdmpyhb_dv_acc_128B
-def int_hexagon_V6_vdmpyhb_dv_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
+def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat,VI_ftype_VIVI,2)
-// tag : V6_vdmpyhvsat
-def int_hexagon_V6_vdmpyhvsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
+def int_hexagon_S4_ntstbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vdmpyhvsat_128B
-def int_hexagon_V6_vdmpyhvsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
+def int_hexagon_F2_conv_w2sf :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_w2sf">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vdmpyhvsat_acc
-def int_hexagon_V6_vdmpyhvsat_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
+def int_hexagon_C2_not :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_not">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vdmpyhvsat_acc_128B
-def int_hexagon_V6_vdmpyhvsat_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
+def int_hexagon_C2_tfrpr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrpr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat,VI_ftype_VISI,2)
-// tag : V6_vdmpyhsat
-def int_hexagon_V6_vdmpyhsat :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_128B,VI_ftype_VISI,2)
-// tag : V6_vdmpyhsat_128B
-def int_hexagon_V6_vdmpyhsat_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhsat_acc
-def int_hexagon_V6_vdmpyhsat_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
+def int_hexagon_A4_cmpbgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhsat_acc_128B
-def int_hexagon_V6_vdmpyhsat_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat,VI_ftype_VDSI,2)
-// tag : V6_vdmpyhisat
-def int_hexagon_V6_vdmpyhisat :
-Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
+def int_hexagon_A4_rcmpneqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_128B,VI_ftype_VDSI,2)
-// tag : V6_vdmpyhisat_128B
-def int_hexagon_V6_vdmpyhisat_128B :
-Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc,VI_ftype_VIVDSI,3)
-// tag : V6_vdmpyhisat_acc
-def int_hexagon_V6_vdmpyhisat_acc :
-Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
+def int_hexagon_M2_subacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_subacc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc_128B,VI_ftype_VIVDSI,3)
-// tag : V6_vdmpyhisat_acc_128B
-def int_hexagon_V6_vdmpyhisat_acc_128B :
-Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
+def int_hexagon_A2_orp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_orp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat,VI_ftype_VISI,2)
-// tag : V6_vdmpyhsusat
-def int_hexagon_V6_vdmpyhsusat :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
+def int_hexagon_M2_mpyu_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_up">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_128B,VI_ftype_VISI,2)
-// tag : V6_vdmpyhsusat_128B
-def int_hexagon_V6_vdmpyhsusat_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhsusat_acc
-def int_hexagon_V6_vdmpyhsusat_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
+def int_hexagon_S2_asr_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vdmpyhsusat_acc_128B
-def int_hexagon_V6_vdmpyhsusat_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
+def int_hexagon_S2_asr_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat,VI_ftype_VDSI,2)
-// tag : V6_vdmpyhsuisat
-def int_hexagon_V6_vdmpyhsuisat :
-Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
+def int_hexagon_A4_cmpbgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_128B,VI_ftype_VDSI,2)
-// tag : V6_vdmpyhsuisat_128B
-def int_hexagon_V6_vdmpyhsuisat_128B :
-Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
+def int_hexagon_A4_vcmpbeq_any :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc,VI_ftype_VIVDSI,3)
-// tag : V6_vdmpyhsuisat_acc
-def int_hexagon_V6_vdmpyhsuisat_acc :
-Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
+def int_hexagon_A4_cmpbgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgti">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc_128B,VI_ftype_VIVDSI,3)
-// tag : V6_vdmpyhsuisat_acc_128B
-def int_hexagon_V6_vdmpyhsuisat_acc_128B :
-Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyb,VD_ftype_VDSI,2)
-// tag : V6_vtmpyb
-def int_hexagon_V6_vtmpyb :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb">;
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyb_128B,VD_ftype_VDSI,2)
-// tag : V6_vtmpyb_128B
-def int_hexagon_V6_vtmpyb_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpyb_acc
-def int_hexagon_V6_vtmpyb_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
+def int_hexagon_A2_addsp :
+Hexagon_i64_i32i64_Intrinsic<"HEXAGON_A2_addsp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpyb_acc_128B
-def int_hexagon_V6_vtmpyb_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
+def int_hexagon_S4_vxsubaddw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpybus,VD_ftype_VDSI,2)
-// tag : V6_vtmpybus
-def int_hexagon_V6_vtmpybus :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus">;
+def int_hexagon_A4_vcmpheqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpheqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpybus_128B,VD_ftype_VDSI,2)
-// tag : V6_vtmpybus_128B
-def int_hexagon_V6_vtmpybus_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
+def int_hexagon_S4_vxsubaddh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpybus_acc
-def int_hexagon_V6_vtmpybus_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
+def int_hexagon_M4_pmpyw :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_pmpyw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpybus_acc_128B
-def int_hexagon_V6_vtmpybus_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
+def int_hexagon_S2_vsathb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyhb,VD_ftype_VDSI,2)
-// tag : V6_vtmpyhb
-def int_hexagon_V6_vtmpyhb :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb">;
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_128B,VD_ftype_VDSI,2)
-// tag : V6_vtmpyhb_128B
-def int_hexagon_V6_vtmpyhb_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpyhb_acc
-def int_hexagon_V6_vtmpyhb_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vtmpyhb_acc_128B
-def int_hexagon_V6_vtmpyhb_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub,VI_ftype_VISI,2)
-// tag : V6_vrmpyub
-def int_hexagon_V6_vrmpyub :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub">;
+def int_hexagon_A2_pxorf :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_A2_pxorf">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_128B,VI_ftype_VISI,2)
-// tag : V6_vrmpyub_128B
-def int_hexagon_V6_vrmpyub_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
+def int_hexagon_C2_cmpgei :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgei">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc,VI_ftype_VIVISI,3)
-// tag : V6_vrmpyub_acc
-def int_hexagon_V6_vrmpyub_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
+def int_hexagon_A2_vsubub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vrmpyub_acc_128B
-def int_hexagon_V6_vrmpyub_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
+def int_hexagon_S2_asl_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubv,VI_ftype_VIVI,2)
-// tag : V6_vrmpyubv
-def int_hexagon_V6_vrmpyubv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv">;
+def int_hexagon_S2_asl_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_128B,VI_ftype_VIVI,2)
-// tag : V6_vrmpyubv_128B
-def int_hexagon_V6_vrmpyubv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
+def int_hexagon_A4_vrminuw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpyubv_acc
-def int_hexagon_V6_vrmpyubv_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
+def int_hexagon_F2_sffma :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpyubv_acc_128B
-def int_hexagon_V6_vrmpyubv_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
+def int_hexagon_A2_absp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_absp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybv,VI_ftype_VIVI,2)
-// tag : V6_vrmpybv
-def int_hexagon_V6_vrmpybv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv">;
+def int_hexagon_C2_all8 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_all8">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybv_128B,VI_ftype_VIVI,2)
-// tag : V6_vrmpybv_128B
-def int_hexagon_V6_vrmpybv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
+def int_hexagon_A4_vrminuh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpybv_acc
-def int_hexagon_V6_vrmpybv_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
+def int_hexagon_F2_sffma_lib :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma_lib">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpybv_acc_128B
-def int_hexagon_V6_vrmpybv_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
+def int_hexagon_M4_vrmpyoh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubi,VD_ftype_VDSISI,3)
-// tag : V6_vrmpyubi
-def int_hexagon_V6_vrmpyubi :
-Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi">;
+def int_hexagon_M4_vrmpyoh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_128B,VD_ftype_VDSISI,3)
-// tag : V6_vrmpyubi_128B
-def int_hexagon_V6_vrmpyubi_128B :
-Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_128B">;
+def int_hexagon_C2_bitsset :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsset">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc,VD_ftype_VDVDSISI,4)
-// tag : V6_vrmpyubi_acc
-def int_hexagon_V6_vrmpyubi_acc :
-Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc">;
+def int_hexagon_M2_mpysip :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysip">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc_128B,VD_ftype_VDVDSISI,4)
-// tag : V6_vrmpyubi_acc_128B
-def int_hexagon_V6_vrmpyubi_acc_128B :
-Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B">;
+def int_hexagon_M2_mpysin :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysin">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybus,VI_ftype_VISI,2)
-// tag : V6_vrmpybus
-def int_hexagon_V6_vrmpybus :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus">;
+def int_hexagon_A4_boundscheck :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_A4_boundscheck">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybus_128B,VI_ftype_VISI,2)
-// tag : V6_vrmpybus_128B
-def int_hexagon_V6_vrmpybus_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
+def int_hexagon_M5_vrmpybuu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybuu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc,VI_ftype_VIVISI,3)
-// tag : V6_vrmpybus_acc
-def int_hexagon_V6_vrmpybus_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
+def int_hexagon_C4_fastcorner9 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vrmpybus_acc_128B
-def int_hexagon_V6_vrmpybus_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusi,VD_ftype_VDSISI,3)
-// tag : V6_vrmpybusi
-def int_hexagon_V6_vrmpybusi :
-Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi">;
+def int_hexagon_A2_neg :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_neg">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_128B,VD_ftype_VDSISI,3)
-// tag : V6_vrmpybusi_128B
-def int_hexagon_V6_vrmpybusi_128B :
-Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_128B">;
+def int_hexagon_A2_subsat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc,VD_ftype_VDVDSISI,4)
-// tag : V6_vrmpybusi_acc
-def int_hexagon_V6_vrmpybusi_acc :
-Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc">;
+def int_hexagon_S2_asl_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc_128B,VD_ftype_VDVDSISI,4)
-// tag : V6_vrmpybusi_acc_128B
-def int_hexagon_V6_vrmpybusi_acc_128B :
-Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B">;
+def int_hexagon_S2_asl_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusv,VI_ftype_VIVI,2)
-// tag : V6_vrmpybusv
-def int_hexagon_V6_vrmpybusv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv">;
+def int_hexagon_A2_vnavgh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_128B,VI_ftype_VIVI,2)
-// tag : V6_vrmpybusv_128B
-def int_hexagon_V6_vrmpybusv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpybusv_acc
-def int_hexagon_V6_vrmpybusv_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
+def int_hexagon_F2_conv_ud2df :
+Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_ud2df">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vrmpybusv_acc_128B
-def int_hexagon_V6_vrmpybusv_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
+def int_hexagon_A2_vnavgw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdsaduh,VD_ftype_VDSI,2)
-// tag : V6_vdsaduh
-def int_hexagon_V6_vdsaduh :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh">;
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdsaduh_128B,VD_ftype_VDSI,2)
-// tag : V6_vdsaduh_128B
-def int_hexagon_V6_vdsaduh_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
+def int_hexagon_S4_subi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vdsaduh_acc
-def int_hexagon_V6_vdsaduh_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
+def int_hexagon_S2_vzxthw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxthw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vdsaduh_acc_128B
-def int_hexagon_V6_vdsaduh_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
+def int_hexagon_F2_sfadd :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfadd">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrsadubi,VD_ftype_VDSISI,3)
-// tag : V6_vrsadubi
-def int_hexagon_V6_vrsadubi :
-Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi">;
+def int_hexagon_A2_sub :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_sub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrsadubi_128B,VD_ftype_VDSISI,3)
-// tag : V6_vrsadubi_128B
-def int_hexagon_V6_vrsadubi_128B :
-Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_128B">;
+def int_hexagon_M2_vmac2su_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc,VD_ftype_VDVDSISI,4)
-// tag : V6_vrsadubi_acc
-def int_hexagon_V6_vrsadubi_acc :
-Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc">;
+def int_hexagon_M2_vmac2su_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc_128B,VD_ftype_VDVDSISI,4)
-// tag : V6_vrsadubi_acc_128B
-def int_hexagon_V6_vrsadubi_acc_128B :
-Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B">;
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrw,VI_ftype_VISI,2)
-// tag : V6_vasrw
-def int_hexagon_V6_vasrw :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrw">;
+def int_hexagon_S2_insert :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_insert">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrw_128B,VI_ftype_VISI,2)
-// tag : V6_vasrw_128B
-def int_hexagon_V6_vasrw_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_128B">;
+def int_hexagon_S2_packhl :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_S2_packhl">;
+def int_hexagon_A4_vcmpwgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgti">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslw,VI_ftype_VISI,2)
-// tag : V6_vaslw
-def int_hexagon_V6_vaslw :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslw">;
+def int_hexagon_A2_vavguwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguwr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslw_128B,VI_ftype_VISI,2)
-// tag : V6_vaslw_128B
-def int_hexagon_V6_vaslw_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_128B">;
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrw,VI_ftype_VISI,2)
-// tag : V6_vlsrw
-def int_hexagon_V6_vlsrw :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrw">;
+def int_hexagon_A2_svsubhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubhs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrw_128B,VI_ftype_VISI,2)
-// tag : V6_vlsrw_128B
-def int_hexagon_V6_vlsrw_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwv,VI_ftype_VIVI,2)
-// tag : V6_vasrwv
-def int_hexagon_V6_vasrwv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrwv">;
+def int_hexagon_M4_and_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwv_128B,VI_ftype_VIVI,2)
-// tag : V6_vasrwv_128B
-def int_hexagon_V6_vasrwv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
+def int_hexagon_F2_conv_d2df :
+Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_d2df">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslwv,VI_ftype_VIVI,2)
-// tag : V6_vaslwv
-def int_hexagon_V6_vaslwv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslwv">;
+def int_hexagon_C2_cmpgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslwv_128B,VI_ftype_VIVI,2)
-// tag : V6_vaslwv_128B
-def int_hexagon_V6_vaslwv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
+def int_hexagon_A2_vconj :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vconj">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrwv,VI_ftype_VIVI,2)
-// tag : V6_vlsrwv
-def int_hexagon_V6_vlsrwv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrwv">;
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrwv_128B,VI_ftype_VIVI,2)
-// tag : V6_vlsrwv_128B
-def int_hexagon_V6_vlsrwv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrh,VI_ftype_VISI,2)
-// tag : V6_vasrh
-def int_hexagon_V6_vasrh :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrh">;
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrh_128B,VI_ftype_VISI,2)
-// tag : V6_vasrh_128B
-def int_hexagon_V6_vasrh_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrh_128B">;
+def int_hexagon_S4_vxsubaddhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslh,VI_ftype_VISI,2)
-// tag : V6_vaslh
-def int_hexagon_V6_vaslh :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslh">;
+def int_hexagon_S2_clbp :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_clbp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslh_128B,VI_ftype_VISI,2)
-// tag : V6_vaslh_128B
-def int_hexagon_V6_vaslh_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslh_128B">;
+def int_hexagon_S2_deinterleave :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_deinterleave">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrh,VI_ftype_VISI,2)
-// tag : V6_vlsrh
-def int_hexagon_V6_vlsrh :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrh">;
+def int_hexagon_C2_any8 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_any8">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrh_128B,VI_ftype_VISI,2)
-// tag : V6_vlsrh_128B
-def int_hexagon_V6_vlsrh_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
+def int_hexagon_S2_togglebit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhv,VI_ftype_VIVI,2)
-// tag : V6_vasrhv
-def int_hexagon_V6_vasrhv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrhv">;
+def int_hexagon_S2_togglebit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_i">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhv_128B,VI_ftype_VIVI,2)
-// tag : V6_vasrhv_128B
-def int_hexagon_V6_vasrhv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
+def int_hexagon_F2_conv_uw2sf :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslhv,VI_ftype_VIVI,2)
-// tag : V6_vaslhv
-def int_hexagon_V6_vaslhv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslhv">;
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslhv_128B,VI_ftype_VIVI,2)
-// tag : V6_vaslhv_128B
-def int_hexagon_V6_vaslhv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrhv,VI_ftype_VIVI,2)
-// tag : V6_vlsrhv
-def int_hexagon_V6_vlsrhv :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrhv">;
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrhv_128B,VI_ftype_VIVI,2)
-// tag : V6_vlsrhv_128B
-def int_hexagon_V6_vlsrhv_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwh,VI_ftype_VIVISI,3)
-// tag : V6_vasrwh
-def int_hexagon_V6_vasrwh :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwh">;
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwh_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrwh_128B
-def int_hexagon_V6_vasrwh_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwhsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrwhsat
-def int_hexagon_V6_vasrwhsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhsat">;
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwhsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrwhsat_128B
-def int_hexagon_V6_vasrwhsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
+def int_hexagon_S2_clrbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrwhrndsat
-def int_hexagon_V6_vasrwhrndsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
+def int_hexagon_C4_or_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrwhrndsat_128B
-def int_hexagon_V6_vasrwhrndsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrwuhsat
-def int_hexagon_V6_vasrwuhsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrwuhsat_128B
-def int_hexagon_V6_vasrwuhsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
+def int_hexagon_A4_vcmpwgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgtui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundwh,VI_ftype_VIVI,2)
-// tag : V6_vroundwh
-def int_hexagon_V6_vroundwh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwh">;
+def int_hexagon_M4_vrmpyoh_acc_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundwh_128B,VI_ftype_VIVI,2)
-// tag : V6_vroundwh_128B
-def int_hexagon_V6_vroundwh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
+def int_hexagon_M4_vrmpyoh_acc_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundwuh,VI_ftype_VIVI,2)
-// tag : V6_vroundwuh
-def int_hexagon_V6_vroundwuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwuh">;
+def int_hexagon_A4_vrmaxh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundwuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vroundwuh_128B
-def int_hexagon_V6_vroundwuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
+def int_hexagon_A2_vcmpbeq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbeq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhubsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrhubsat
-def int_hexagon_V6_vasrhubsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubsat">;
+def int_hexagon_A2_vcmphgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhubsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrhubsat_128B
-def int_hexagon_V6_vasrhubsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
+def int_hexagon_A2_vnavgwcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwcr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrhubrndsat
-def int_hexagon_V6_vasrhubrndsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrhubrndsat_128B
-def int_hexagon_V6_vasrhubrndsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
+def int_hexagon_A2_vavgwcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwcr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrhbrndsat
-def int_hexagon_V6_vasrhbrndsat :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrhbrndsat_128B
-def int_hexagon_V6_vasrhbrndsat_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
+def int_hexagon_A4_vrmaxw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundhb,VI_ftype_VIVI,2)
-// tag : V6_vroundhb
-def int_hexagon_V6_vroundhb :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhb">;
+def int_hexagon_A2_vnavghr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundhb_128B,VI_ftype_VIVI,2)
-// tag : V6_vroundhb_128B
-def int_hexagon_V6_vroundhb_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
+def int_hexagon_M4_cmpyi_wh :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundhub,VI_ftype_VIVI,2)
-// tag : V6_vroundhub
-def int_hexagon_V6_vroundhub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhub">;
+def int_hexagon_A2_tfrsi :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrsi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vroundhub_128B,VI_ftype_VIVI,2)
-// tag : V6_vroundhub_128B
-def int_hexagon_V6_vroundhub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslw_acc,VI_ftype_VIVISI,3)
-// tag : V6_vaslw_acc
-def int_hexagon_V6_vaslw_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vaslw_acc">;
+def int_hexagon_A2_svnavgh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svnavgh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslw_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vaslw_acc_128B
-def int_hexagon_V6_vaslw_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
+def int_hexagon_S2_lsr_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrw_acc,VI_ftype_VIVISI,3)
-// tag : V6_vasrw_acc
-def int_hexagon_V6_vasrw_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrw_acc">;
+def int_hexagon_M2_vmac2 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrw_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrw_acc_128B
-def int_hexagon_V6_vasrw_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
+def int_hexagon_A4_vcmphgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgtui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddb,VI_ftype_VIVI,2)
-// tag : V6_vaddb
-def int_hexagon_V6_vaddb :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddb">;
+def int_hexagon_A2_svavgh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavgh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddb_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddb_128B
-def int_hexagon_V6_vaddb_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_128B">;
+def int_hexagon_M4_vrmpyeh_acc_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubb,VI_ftype_VIVI,2)
-// tag : V6_vsubb
-def int_hexagon_V6_vsubb :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubb">;
+def int_hexagon_M4_vrmpyeh_acc_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubb_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubb_128B
-def int_hexagon_V6_vsubb_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_128B">;
+def int_hexagon_S2_lsr_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddb_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddb_dv
-def int_hexagon_V6_vaddb_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_dv">;
+def int_hexagon_A2_combine_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddb_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddb_dv_128B
-def int_hexagon_V6_vaddb_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
+def int_hexagon_M2_mpy_up :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubb_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubb_dv
-def int_hexagon_V6_vsubb_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_dv">;
+def int_hexagon_A2_combine_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubb_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubb_dv_128B
-def int_hexagon_V6_vsubb_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
+def int_hexagon_A2_negsat :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_negsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddh,VI_ftype_VIVI,2)
-// tag : V6_vaddh
-def int_hexagon_V6_vaddh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddh">;
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddh_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddh_128B
-def int_hexagon_V6_vaddh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_128B">;
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubh,VI_ftype_VIVI,2)
-// tag : V6_vsubh
-def int_hexagon_V6_vsubh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubh">;
+def int_hexagon_A4_bitsplit :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitsplit">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubh_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubh_128B
-def int_hexagon_V6_vsubh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_128B">;
+def int_hexagon_A2_vabshsat :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabshsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddh_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddh_dv
-def int_hexagon_V6_vaddh_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_dv">;
+def int_hexagon_M2_mpyui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddh_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddh_dv_128B
-def int_hexagon_V6_vaddh_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubh_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubh_dv
-def int_hexagon_V6_vsubh_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_dv">;
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubh_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubh_dv_128B
-def int_hexagon_V6_vsubh_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddw,VI_ftype_VIVI,2)
-// tag : V6_vaddw
-def int_hexagon_V6_vaddw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddw">;
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddw_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddw_128B
-def int_hexagon_V6_vaddw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_128B">;
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubw,VI_ftype_VIVI,2)
-// tag : V6_vsubw
-def int_hexagon_V6_vsubw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubw">;
+def int_hexagon_C2_cmplt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmplt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubw_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubw_128B
-def int_hexagon_V6_vsubw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_128B">;
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddw_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddw_dv
-def int_hexagon_V6_vaddw_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_dv">;
+def int_hexagon_M4_or_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddw_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddw_dv_128B
-def int_hexagon_V6_vaddw_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
+def int_hexagon_M4_mpyrr_addi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubw_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubw_dv
-def int_hexagon_V6_vsubw_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_dv">;
+def int_hexagon_S4_or_andi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubw_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubw_dv_128B
-def int_hexagon_V6_vsubw_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubsat,VI_ftype_VIVI,2)
-// tag : V6_vaddubsat
-def int_hexagon_V6_vaddubsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddubsat">;
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddubsat_128B
-def int_hexagon_V6_vaddubsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
+def int_hexagon_M4_mpyrr_addr :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddubsat_dv
-def int_hexagon_V6_vaddubsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddubsat_dv_128B
-def int_hexagon_V6_vaddubsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububsat,VI_ftype_VIVI,2)
-// tag : V6_vsububsat
-def int_hexagon_V6_vsububsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsububsat">;
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsububsat_128B
-def int_hexagon_V6_vsububsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsububsat_dv
-def int_hexagon_V6_vsububsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsububsat_dv_128B
-def int_hexagon_V6_vsububsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
+def int_hexagon_F2_sffixupn :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhsat,VI_ftype_VIVI,2)
-// tag : V6_vadduhsat
-def int_hexagon_V6_vadduhsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vadduhsat">;
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vadduhsat_128B
-def int_hexagon_V6_vadduhsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vadduhsat_dv
-def int_hexagon_V6_vadduhsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vadduhsat_dv_128B
-def int_hexagon_V6_vadduhsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhsat,VI_ftype_VIVI,2)
-// tag : V6_vsubuhsat
-def int_hexagon_V6_vsubuhsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuhsat">;
+def int_hexagon_A2_vadduhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vadduhs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubuhsat_128B
-def int_hexagon_V6_vsubuhsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
+def int_hexagon_A2_vsubuhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubuhs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubuhsat_dv
-def int_hexagon_V6_vsubuhsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubuhsat_dv_128B
-def int_hexagon_V6_vsubuhsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhsat,VI_ftype_VIVI,2)
-// tag : V6_vaddhsat
-def int_hexagon_V6_vaddhsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddhsat">;
+def int_hexagon_A2_xorp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_xorp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddhsat_128B
-def int_hexagon_V6_vaddhsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
+def int_hexagon_A4_tfrpcp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A4_tfrpcp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddhsat_dv
-def int_hexagon_V6_vaddhsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddhsat_dv_128B
-def int_hexagon_V6_vaddhsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhsat,VI_ftype_VIVI,2)
-// tag : V6_vsubhsat
-def int_hexagon_V6_vsubhsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubhsat">;
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubhsat_128B
-def int_hexagon_V6_vsubhsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubhsat_dv
-def int_hexagon_V6_vsubhsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
+def int_hexagon_A2_zxtb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxtb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubhsat_dv_128B
-def int_hexagon_V6_vsubhsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
+def int_hexagon_A2_zxth :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxth">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwsat,VI_ftype_VIVI,2)
-// tag : V6_vaddwsat
-def int_hexagon_V6_vaddwsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddwsat">;
+def int_hexagon_A2_vnavgwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddwsat_128B
-def int_hexagon_V6_vaddwsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
+def int_hexagon_M4_or_xor :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddwsat_dv
-def int_hexagon_V6_vaddwsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddwsat_dv_128B
-def int_hexagon_V6_vaddwsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwsat,VI_ftype_VIVI,2)
-// tag : V6_vsubwsat
-def int_hexagon_V6_vsubwsat :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubwsat">;
+def int_hexagon_M5_vmacbsu :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbsu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubwsat_128B
-def int_hexagon_V6_vsubwsat_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubwsat_dv
-def int_hexagon_V6_vsubwsat_dv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubwsat_dv_128B
-def int_hexagon_V6_vsubwsat_dv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgub,VI_ftype_VIVI,2)
-// tag : V6_vavgub
-def int_hexagon_V6_vavgub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgub">;
+def int_hexagon_F2_sffms_lib :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms_lib">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgub_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgub_128B
-def int_hexagon_V6_vavgub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgub_128B">;
+def int_hexagon_C4_cmpneqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgubrnd,VI_ftype_VIVI,2)
-// tag : V6_vavgubrnd
-def int_hexagon_V6_vavgubrnd :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgubrnd">;
+def int_hexagon_M4_and_xor :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgubrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgubrnd_128B
-def int_hexagon_V6_vavgubrnd_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
+def int_hexagon_A2_sat :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguh,VI_ftype_VIVI,2)
-// tag : V6_vavguh
-def int_hexagon_V6_vavguh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguh">;
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguh_128B,VI_ftype_VIVI,2)
-// tag : V6_vavguh_128B
-def int_hexagon_V6_vavguh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguh_128B">;
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguhrnd,VI_ftype_VIVI,2)
-// tag : V6_vavguhrnd
-def int_hexagon_V6_vavguhrnd :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguhrnd">;
+def int_hexagon_A2_addsat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguhrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavguhrnd_128B
-def int_hexagon_V6_vavguhrnd_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
+def int_hexagon_A2_svavghs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavghs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgh,VI_ftype_VIVI,2)
-// tag : V6_vavgh
-def int_hexagon_V6_vavgh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgh">;
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgh_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgh_128B
-def int_hexagon_V6_vavgh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgh_128B">;
+def int_hexagon_C2_bitsclri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavghrnd,VI_ftype_VIVI,2)
-// tag : V6_vavghrnd
-def int_hexagon_V6_vavghrnd :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavghrnd">;
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavghrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavghrnd_128B
-def int_hexagon_V6_vavghrnd_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgh,VI_ftype_VIVI,2)
-// tag : V6_vnavgh
-def int_hexagon_V6_vnavgh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgh">;
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgh_128B,VI_ftype_VIVI,2)
-// tag : V6_vnavgh_128B
-def int_hexagon_V6_vnavgh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgw,VI_ftype_VIVI,2)
-// tag : V6_vavgw
-def int_hexagon_V6_vavgw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgw">;
+def int_hexagon_M2_vradduh :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vradduh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgw_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgw_128B
-def int_hexagon_V6_vavgw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgw_128B">;
+def int_hexagon_A4_addp_c :
+Hexagon_i64i32_i64i64i32_Intrinsic<"HEXAGON_A4_addp_c">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgwrnd,VI_ftype_VIVI,2)
-// tag : V6_vavgwrnd
-def int_hexagon_V6_vavgwrnd :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgwrnd">;
+def int_hexagon_C2_xor :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgwrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgwrnd_128B
-def int_hexagon_V6_vavgwrnd_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgw,VI_ftype_VIVI,2)
-// tag : V6_vnavgw
-def int_hexagon_V6_vnavgw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgw">;
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgw_128B,VI_ftype_VIVI,2)
-// tag : V6_vnavgw_128B
-def int_hexagon_V6_vnavgw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffub,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffub
-def int_hexagon_V6_vabsdiffub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffub">;
+def int_hexagon_F2_conv_df2ud_chop :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffub_128B,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffub_128B
-def int_hexagon_V6_vabsdiffub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
+def int_hexagon_C4_or_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffuh
-def int_hexagon_V6_vabsdiffuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
+def int_hexagon_S4_vxaddsubhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffuh_128B
-def int_hexagon_V6_vabsdiffuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
+def int_hexagon_S2_vsathub :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffh,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffh
-def int_hexagon_V6_vabsdiffh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffh">;
+def int_hexagon_F2_conv_df2sf :
+Hexagon_float_double_Intrinsic<"HEXAGON_F2_conv_df2sf">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffh_128B,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffh_128B
-def int_hexagon_V6_vabsdiffh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffw,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffw
-def int_hexagon_V6_vabsdiffw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffw">;
+def int_hexagon_M2_hmmpyh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsdiffw_128B,VI_ftype_VIVI,2)
-// tag : V6_vabsdiffw_128B
-def int_hexagon_V6_vabsdiffw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
+def int_hexagon_A2_vavgwr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgub,VI_ftype_VIVI,2)
-// tag : V6_vnavgub
-def int_hexagon_V6_vnavgub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgub">;
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgub_128B,VI_ftype_VIVI,2)
-// tag : V6_vnavgub_128B
-def int_hexagon_V6_vnavgub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
+def int_hexagon_A2_sxth :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxth">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubh,VD_ftype_VIVI,2)
-// tag : V6_vaddubh
-def int_hexagon_V6_vaddubh :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh">;
+def int_hexagon_A2_sxtb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxtb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubh_128B,VD_ftype_VIVI,2)
-// tag : V6_vaddubh_128B
-def int_hexagon_V6_vaddubh_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
+def int_hexagon_C4_or_orn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_orn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububh,VD_ftype_VIVI,2)
-// tag : V6_vsububh
-def int_hexagon_V6_vsububh :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsububh">;
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsububh_128B,VD_ftype_VIVI,2)
-// tag : V6_vsububh_128B
-def int_hexagon_V6_vsububh_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsububh_128B">;
+def int_hexagon_A2_sxtw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_sxtw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhw,VD_ftype_VIVI,2)
-// tag : V6_vaddhw
-def int_hexagon_V6_vaddhw :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw">;
+def int_hexagon_M2_vabsdiffh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhw_128B,VD_ftype_VIVI,2)
-// tag : V6_vaddhw_128B
-def int_hexagon_V6_vaddhw_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhw,VD_ftype_VIVI,2)
-// tag : V6_vsubhw
-def int_hexagon_V6_vsubhw :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubhw">;
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhw_128B,VD_ftype_VIVI,2)
-// tag : V6_vsubhw_128B
-def int_hexagon_V6_vsubhw_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
+def int_hexagon_M2_hmmpyl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhw,VD_ftype_VIVI,2)
-// tag : V6_vadduhw
-def int_hexagon_V6_vadduhw :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw">;
+def int_hexagon_S2_cl1p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl1p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhw_128B,VD_ftype_VIVI,2)
-// tag : V6_vadduhw_128B
-def int_hexagon_V6_vadduhw_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
+def int_hexagon_M2_vabsdiffw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhw,VD_ftype_VIVI,2)
-// tag : V6_vsubuhw
-def int_hexagon_V6_vsubuhw :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubuhw">;
+def int_hexagon_A4_andnp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_andnp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuhw_128B,VD_ftype_VIVI,2)
-// tag : V6_vsubuhw_128B
-def int_hexagon_V6_vsubuhw_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
+def int_hexagon_C2_vmux :
+Hexagon_i64_i32i64i64_Intrinsic<"HEXAGON_C2_vmux">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vd0,VI_ftype_,0)
-// tag : V6_vd0
-def int_hexagon_V6_vd0 :
-Hexagon_v512_Intrinsic<"HEXAGON_V6_vd0">;
+def int_hexagon_S2_parityp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_S2_parityp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vd0_128B,VI_ftype_,0)
-// tag : V6_vd0_128B
-def int_hexagon_V6_vd0_128B :
-Hexagon_v1024_Intrinsic<"HEXAGON_V6_vd0_128B">;
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddbq
-def int_hexagon_V6_vaddbq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbq">;
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddbq_128B
-def int_hexagon_V6_vaddbq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubbq
-def int_hexagon_V6_vsubbq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbq">;
+def int_hexagon_F2_sfcmpeq :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubbq_128B
-def int_hexagon_V6_vsubbq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
+def int_hexagon_A2_vaddb_map :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddb_map">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbnq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddbnq
-def int_hexagon_V6_vaddbnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbnq">;
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddbnq_128B
-def int_hexagon_V6_vaddbnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
+def int_hexagon_A2_vcmpheq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpheq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbnq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubbnq
-def int_hexagon_V6_vsubbnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbnq">;
+def int_hexagon_S2_clbnorm :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clbnorm">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubbnq_128B
-def int_hexagon_V6_vsubbnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddhq
-def int_hexagon_V6_vaddhq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhq">;
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddhq_128B
-def int_hexagon_V6_vaddhq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
+def int_hexagon_S4_subaddi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subaddi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubhq
-def int_hexagon_V6_vsubhq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhq">;
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubhq_128B
-def int_hexagon_V6_vsubhq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhnq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddhnq
-def int_hexagon_V6_vaddhnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhnq">;
+def int_hexagon_S5_vasrhrnd_goodsyntax :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddhnq_128B
-def int_hexagon_V6_vaddhnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
+def int_hexagon_S2_tstbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_r">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhnq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubhnq
-def int_hexagon_V6_vsubhnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhnq">;
+def int_hexagon_S4_vrcrotate :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubhnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubhnq_128B
-def int_hexagon_V6_vsubhnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddwq
-def int_hexagon_V6_vaddwq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwq">;
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddwq_128B
-def int_hexagon_V6_vaddwq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
+def int_hexagon_S2_tstbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubwq
-def int_hexagon_V6_vsubwq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwq">;
+def int_hexagon_M2_mpy_up_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubwq_128B
-def int_hexagon_V6_vsubwq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
+def int_hexagon_S2_extractu_rp :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S2_extractu_rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwnq,VI_ftype_QVVIVI,3)
-// tag : V6_vaddwnq
-def int_hexagon_V6_vaddwnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwnq">;
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddwnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vaddwnq_128B
-def int_hexagon_V6_vaddwnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwnq,VI_ftype_QVVIVI,3)
-// tag : V6_vsubwnq
-def int_hexagon_V6_vsubwnq :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwnq">;
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubwnq_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vsubwnq_128B
-def int_hexagon_V6_vsubwnq_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsh,VI_ftype_VI,1)
-// tag : V6_vabsh
-def int_hexagon_V6_vabsh :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh">;
+def int_hexagon_M4_or_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsh_128B,VI_ftype_VI,1)
-// tag : V6_vabsh_128B
-def int_hexagon_V6_vabsh_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_128B">;
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsh_sat,VI_ftype_VI,1)
-// tag : V6_vabsh_sat
-def int_hexagon_V6_vabsh_sat :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh_sat">;
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsh_sat_128B,VI_ftype_VI,1)
-// tag : V6_vabsh_sat_128B
-def int_hexagon_V6_vabsh_sat_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsw,VI_ftype_VI,1)
-// tag : V6_vabsw
-def int_hexagon_V6_vabsw :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw">;
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsw_128B,VI_ftype_VI,1)
-// tag : V6_vabsw_128B
-def int_hexagon_V6_vabsw_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_128B">;
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsw_sat,VI_ftype_VI,1)
-// tag : V6_vabsw_sat
-def int_hexagon_V6_vabsw_sat :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw_sat">;
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsw_sat_128B,VI_ftype_VI,1)
-// tag : V6_vabsw_sat_128B
-def int_hexagon_V6_vabsw_sat_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybv,VD_ftype_VIVI,2)
-// tag : V6_vmpybv
-def int_hexagon_V6_vmpybv :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv">;
+def int_hexagon_F2_conv_w2df :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_w2df">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybv_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpybv_128B
-def int_hexagon_V6_vmpybv_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpybv_acc
-def int_hexagon_V6_vmpybv_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
+def int_hexagon_C2_cmpeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpybv_acc_128B
-def int_hexagon_V6_vmpybv_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyubv,VD_ftype_VIVI,2)
-// tag : V6_vmpyubv
-def int_hexagon_V6_vmpyubv :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv">;
+def int_hexagon_S2_vcnegh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcnegh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyubv_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpyubv_128B
-def int_hexagon_V6_vmpyubv_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
+def int_hexagon_A4_vcmpweqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpweqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyubv_acc
-def int_hexagon_V6_vmpyubv_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyubv_acc_128B
-def int_hexagon_V6_vmpyubv_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybusv,VD_ftype_VIVI,2)
-// tag : V6_vmpybusv
-def int_hexagon_V6_vmpybusv :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv">;
+def int_hexagon_M4_xor_xacc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_xor_xacc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybusv_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpybusv_128B
-def int_hexagon_V6_vmpybusv_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpybusv_acc
-def int_hexagon_V6_vmpybusv_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpybusv_acc_128B
-def int_hexagon_V6_vmpybusv_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
+def int_hexagon_A2_vavgubr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgubr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabusv,VD_ftype_VDVD,2)
-// tag : V6_vmpabusv
-def int_hexagon_V6_vmpabusv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabusv">;
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabusv_128B,VD_ftype_VDVD,2)
-// tag : V6_vmpabusv_128B
-def int_hexagon_V6_vmpabusv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuuv,VD_ftype_VDVD,2)
-// tag : V6_vmpabuuv
-def int_hexagon_V6_vmpabuuv :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabuuv">;
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuuv_128B,VD_ftype_VDVD,2)
-// tag : V6_vmpabuuv_128B
-def int_hexagon_V6_vmpabuuv_128B :
-Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
+def int_hexagon_S2_cl0p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl0p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhv,VD_ftype_VIVI,2)
-// tag : V6_vmpyhv
-def int_hexagon_V6_vmpyhv :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv">;
+def int_hexagon_S2_valignib :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignib">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhv_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpyhv_128B
-def int_hexagon_V6_vmpyhv_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
+def int_hexagon_F2_sffixupd :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupd">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyhv_acc
-def int_hexagon_V6_vmpyhv_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyhv_acc_128B
-def int_hexagon_V6_vmpyhv_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhv,VD_ftype_VIVI,2)
-// tag : V6_vmpyuhv
-def int_hexagon_V6_vmpyuhv :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv">;
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpyuhv_128B
-def int_hexagon_V6_vmpyuhv_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyuhv_acc
-def int_hexagon_V6_vmpyuhv_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
+def int_hexagon_S2_ct1 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyuhv_acc_128B
-def int_hexagon_V6_vmpyuhv_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
+def int_hexagon_S2_ct0 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs,VI_ftype_VIVI,2)
-// tag : V6_vmpyhvsrs
-def int_hexagon_V6_vmpyhvsrs :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyhvsrs_128B
-def int_hexagon_V6_vmpyhvsrs_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhus,VD_ftype_VIVI,2)
-// tag : V6_vmpyhus
-def int_hexagon_V6_vmpyhus :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus">;
+def int_hexagon_S4_ntstbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_i">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhus_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpyhus_128B
-def int_hexagon_V6_vmpyhus_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
+def int_hexagon_F2_sffixupr :
+Hexagon_float_float_Intrinsic<"HEXAGON_F2_sffixupr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyhus_acc
-def int_hexagon_V6_vmpyhus_acc :
-Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
+def int_hexagon_S2_asr_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyhus_acc_128B
-def int_hexagon_V6_vmpyhus_acc_128B :
-Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyih,VI_ftype_VIVI,2)
-// tag : V6_vmpyih
-def int_hexagon_V6_vmpyih :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih">;
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyih_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyih_128B
-def int_hexagon_V6_vmpyih_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
+def int_hexagon_A2_vcmphgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgtu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyih_acc
-def int_hexagon_V6_vmpyih_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
+def int_hexagon_C2_andn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_andn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyih_acc_128B
-def int_hexagon_V6_vmpyih_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyewuh,VI_ftype_VIVI,2)
-// tag : V6_vmpyewuh
-def int_hexagon_V6_vmpyewuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh">;
+def int_hexagon_S4_addaddi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addaddi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyewuh_128B
-def int_hexagon_V6_vmpyewuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh,VI_ftype_VIVI,2)
-// tag : V6_vmpyowh
-def int_hexagon_V6_vmpyowh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh">;
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyowh_128B
-def int_hexagon_V6_vmpyowh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
+def int_hexagon_A4_rcmpeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd,VI_ftype_VIVI,2)
-// tag : V6_vmpyowh_rnd
-def int_hexagon_V6_vmpyowh_rnd :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
+def int_hexagon_M4_xor_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyowh_rnd_128B
-def int_hexagon_V6_vmpyowh_rnd_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyowh_sacc
-def int_hexagon_V6_vmpyowh_sacc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyowh_sacc_128B
-def int_hexagon_V6_vmpyowh_sacc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyowh_rnd_sacc
-def int_hexagon_V6_vmpyowh_rnd_sacc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
+def int_hexagon_A4_round_ri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyowh_rnd_sacc_128B
-def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
+def int_hexagon_A2_max :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_max">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyieoh,VI_ftype_VIVI,2)
-// tag : V6_vmpyieoh
-def int_hexagon_V6_vmpyieoh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyieoh">;
+def int_hexagon_A4_round_rr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyieoh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyieoh_128B
-def int_hexagon_V6_vmpyieoh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
+def int_hexagon_A4_combineii :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineii">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh,VI_ftype_VIVI,2)
-// tag : V6_vmpyiewuh
-def int_hexagon_V6_vmpyiewuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
+def int_hexagon_A4_combineir :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineir">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyiewuh_128B
-def int_hexagon_V6_vmpyiewuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
+def int_hexagon_C4_and_orn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_orn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiowh,VI_ftype_VIVI,2)
-// tag : V6_vmpyiowh
-def int_hexagon_V6_vmpyiowh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiowh">;
+def int_hexagon_M5_vmacbuu :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbuu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiowh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmpyiowh_128B
-def int_hexagon_V6_vmpyiowh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
+def int_hexagon_A4_rcmpeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyiewh_acc
-def int_hexagon_V6_vmpyiewh_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
+def int_hexagon_M4_cmpyr_whc :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyiewh_acc_128B
-def int_hexagon_V6_vmpyiewh_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyiewuh_acc
-def int_hexagon_V6_vmpyiewuh_acc :
-Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
+def int_hexagon_S2_vzxtbh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxtbh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc_128B,VI_ftype_VIVIVI,3)
-// tag : V6_vmpyiewuh_acc_128B
-def int_hexagon_V6_vmpyiewuh_acc_128B :
-Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyub,VD_ftype_VISI,2)
-// tag : V6_vmpyub
-def int_hexagon_V6_vmpyub :
-Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub">;
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyub_128B,VD_ftype_VISI,2)
-// tag : V6_vmpyub_128B
-def int_hexagon_V6_vmpyub_128B :
-Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
+def int_hexagon_A2_combinew :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combinew">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc,VD_ftype_VDVISI,3)
-// tag : V6_vmpyub_acc
-def int_hexagon_V6_vmpyub_acc :
-Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc_128B,VD_ftype_VDVISI,3)
-// tag : V6_vmpyub_acc_128B
-def int_hexagon_V6_vmpyub_acc_128B :
-Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybus,VD_ftype_VISI,2)
-// tag : V6_vmpybus
-def int_hexagon_V6_vmpybus :
-Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus">;
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybus_128B,VD_ftype_VISI,2)
-// tag : V6_vmpybus_128B
-def int_hexagon_V6_vmpybus_128B :
-Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc,VD_ftype_VDVISI,3)
-// tag : V6_vmpybus_acc
-def int_hexagon_V6_vmpybus_acc :
-Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
+def int_hexagon_S4_ori_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc_128B,VD_ftype_VDVISI,3)
-// tag : V6_vmpybus_acc_128B
-def int_hexagon_V6_vmpybus_acc_128B :
-Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
+def int_hexagon_C4_nbitsset :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsset">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabus,VD_ftype_VDSI,2)
-// tag : V6_vmpabus
-def int_hexagon_V6_vmpabus :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus">;
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabus_128B,VD_ftype_VDSI,2)
-// tag : V6_vmpabus_128B
-def int_hexagon_V6_vmpabus_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vmpabus_acc
-def int_hexagon_V6_vmpabus_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vmpabus_acc_128B
-def int_hexagon_V6_vmpabus_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahb,VD_ftype_VDSI,2)
-// tag : V6_vmpahb
-def int_hexagon_V6_vmpahb :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb">;
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahb_128B,VD_ftype_VDSI,2)
-// tag : V6_vmpahb_128B
-def int_hexagon_V6_vmpahb_128B :
-Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vmpahb_acc
-def int_hexagon_V6_vmpahb_acc :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
+def int_hexagon_A4_modwrapu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_modwrapu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vmpahb_acc_128B
-def int_hexagon_V6_vmpahb_acc_128B :
-Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
+def int_hexagon_A4_rcmpneq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyh,VD_ftype_VISI,2)
-// tag : V6_vmpyh
-def int_hexagon_V6_vmpyh :
-Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyh">;
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyh_128B,VD_ftype_VISI,2)
-// tag : V6_vmpyh_128B
-def int_hexagon_V6_vmpyh_128B :
-Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc,VD_ftype_VDVISI,3)
-// tag : V6_vmpyhsat_acc
-def int_hexagon_V6_vmpyhsat_acc :
-Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
+def int_hexagon_F2_sfimm_p :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc_128B,VD_ftype_VDVISI,3)
-// tag : V6_vmpyhsat_acc_128B
-def int_hexagon_V6_vmpyhsat_acc_128B :
-Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
+def int_hexagon_F2_sfimm_n :
+Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_n">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhss,VI_ftype_VISI,2)
-// tag : V6_vmpyhss
-def int_hexagon_V6_vmpyhss :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhss">;
+def int_hexagon_M4_cmpyr_wh :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhss_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyhss_128B
-def int_hexagon_V6_vmpyhss_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs,VI_ftype_VISI,2)
-// tag : V6_vmpyhsrs
-def int_hexagon_V6_vmpyhsrs :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
+def int_hexagon_A2_vavgub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyhsrs_128B
-def int_hexagon_V6_vmpyhsrs_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
+def int_hexagon_F2_conv_d2sf :
+Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_d2sf">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuh,VD_ftype_VISI,2)
-// tag : V6_vmpyuh
-def int_hexagon_V6_vmpyuh :
-Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh">;
+def int_hexagon_A2_vavguh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuh_128B,VD_ftype_VISI,2)
-// tag : V6_vmpyuh_128B
-def int_hexagon_V6_vmpyuh_128B :
-Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
+def int_hexagon_A4_cmpbeqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeqi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc,VD_ftype_VDVISI,3)
-// tag : V6_vmpyuh_acc
-def int_hexagon_V6_vmpyuh_acc :
-Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
+def int_hexagon_F2_sfcmpuo :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpuo">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc_128B,VD_ftype_VDVISI,3)
-// tag : V6_vmpyuh_acc_128B
-def int_hexagon_V6_vmpyuh_acc_128B :
-Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
+def int_hexagon_A2_vavguw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyihb,VI_ftype_VISI,2)
-// tag : V6_vmpyihb
-def int_hexagon_V6_vmpyihb :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb">;
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyihb_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyihb_128B
-def int_hexagon_V6_vmpyihb_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc,VI_ftype_VIVISI,3)
-// tag : V6_vmpyihb_acc
-def int_hexagon_V6_vmpyihb_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vmpyihb_acc_128B
-def int_hexagon_V6_vmpyihb_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwb,VI_ftype_VISI,2)
-// tag : V6_vmpyiwb
-def int_hexagon_V6_vmpyiwb :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb">;
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyiwb_128B
-def int_hexagon_V6_vmpyiwb_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
+def int_hexagon_A2_minu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_minu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwb_acc
-def int_hexagon_V6_vmpyiwb_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwb_acc_128B
-def int_hexagon_V6_vmpyiwb_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
+def int_hexagon_M4_or_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_andn">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwh,VI_ftype_VISI,2)
-// tag : V6_vmpyiwh
-def int_hexagon_V6_vmpyiwh :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh">;
+def int_hexagon_A2_minp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyiwh_128B
-def int_hexagon_V6_vmpyiwh_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
+def int_hexagon_S4_or_andix :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andix">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwh_acc
-def int_hexagon_V6_vmpyiwh_acc :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwh_acc_128B
-def int_hexagon_V6_vmpyiwh_acc_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vand,VI_ftype_VIVI,2)
-// tag : V6_vand
-def int_hexagon_V6_vand :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vand">;
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vand_128B,VI_ftype_VIVI,2)
-// tag : V6_vand_128B
-def int_hexagon_V6_vand_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vand_128B">;
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vor,VI_ftype_VIVI,2)
-// tag : V6_vor
-def int_hexagon_V6_vor :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vor">;
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vor_128B,VI_ftype_VIVI,2)
-// tag : V6_vor_128B
-def int_hexagon_V6_vor_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vor_128B">;
+def int_hexagon_F2_sfcmpge :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpge">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vxor,VI_ftype_VIVI,2)
-// tag : V6_vxor
-def int_hexagon_V6_vxor :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vxor">;
+def int_hexagon_F2_sfmin :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmin">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vxor_128B,VI_ftype_VIVI,2)
-// tag : V6_vxor_128B
-def int_hexagon_V6_vxor_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vxor_128B">;
+def int_hexagon_F2_sfcmpgt :
+Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpgt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnot,VI_ftype_VI,1)
-// tag : V6_vnot
-def int_hexagon_V6_vnot :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnot">;
+def int_hexagon_M4_vpmpyh :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_vpmpyh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnot_128B,VI_ftype_VI,1)
-// tag : V6_vnot_128B
-def int_hexagon_V6_vnot_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnot_128B">;
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandqrt,VI_ftype_QVSI,2)
-// tag : V6_vandqrt
-def int_hexagon_V6_vandqrt :
-Hexagon_v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt">;
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandqrt_128B,VI_ftype_QVSI,2)
-// tag : V6_vandqrt_128B
-def int_hexagon_V6_vandqrt_128B :
-Hexagon_v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc,VI_ftype_VIQVSI,3)
-// tag : V6_vandqrt_acc
-def int_hexagon_V6_vandqrt_acc :
-Hexagon_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
+def int_hexagon_A2_roundsat :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_roundsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc_128B,VI_ftype_VIQVSI,3)
-// tag : V6_vandqrt_acc_128B
-def int_hexagon_V6_vandqrt_acc_128B :
-Hexagon_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
+def int_hexagon_S2_ct1p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct1p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvrt,QV_ftype_VISI,2)
-// tag : V6_vandvrt
-def int_hexagon_V6_vandvrt :
-Hexagon_v64iv512i_Intrinsic<"HEXAGON_V6_vandvrt">;
+def int_hexagon_S4_extract_rp :
+Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S4_extract_rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvrt_128B,QV_ftype_VISI,2)
-// tag : V6_vandvrt_128B
-def int_hexagon_V6_vandvrt_128B :
-Hexagon_v128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc,QV_ftype_QVVISI,3)
-// tag : V6_vandvrt_acc
-def int_hexagon_V6_vandvrt_acc :
-Hexagon_v64iv64iv512i_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
+def int_hexagon_C4_cmplteui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteui">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc_128B,QV_ftype_QVVISI,3)
-// tag : V6_vandvrt_acc_128B
-def int_hexagon_V6_vandvrt_acc_128B :
-Hexagon_v128iv128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
+def int_hexagon_S4_addi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_lsr_ri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw,QV_ftype_VIVI,2)
-// tag : V6_vgtw
-def int_hexagon_V6_vgtw :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtw">;
+def int_hexagon_A4_tfrcpp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A4_tfrcpp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_128B,QV_ftype_VIVI,2)
-// tag : V6_vgtw_128B
-def int_hexagon_V6_vgtw_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_128B">;
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_i_svw_trun">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_and
-def int_hexagon_V6_vgtw_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_and">;
+def int_hexagon_A4_cmphgti :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgti">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_and_128B
-def int_hexagon_V6_vgtw_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
+def int_hexagon_A4_vrminh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_or
-def int_hexagon_V6_vgtw_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_or">;
+def int_hexagon_A4_vrminw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_or_128B
-def int_hexagon_V6_vgtw_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
+def int_hexagon_A4_cmphgtu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_xor
-def int_hexagon_V6_vgtw_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_xor">;
+def int_hexagon_S2_insertp_rp :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_S2_insertp_rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtw_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtw_xor_128B
-def int_hexagon_V6_vgtw_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
+def int_hexagon_A2_vnavghcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghcr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw,QV_ftype_VIVI,2)
-// tag : V6_veqw
-def int_hexagon_V6_veqw :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqw">;
+def int_hexagon_S4_subi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_asl_ri">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_128B,QV_ftype_VIVI,2)
-// tag : V6_veqw_128B
-def int_hexagon_V6_veqw_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_128B">;
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_and,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_and
-def int_hexagon_V6_veqw_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_and">;
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_and_128B
-def int_hexagon_V6_veqw_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
+def int_hexagon_A2_vsubws :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubws">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_or,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_or
-def int_hexagon_V6_veqw_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_or">;
+def int_hexagon_A2_sath :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sath">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_or_128B
-def int_hexagon_V6_veqw_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
+def int_hexagon_S2_asl_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_xor,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_xor
-def int_hexagon_V6_veqw_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_xor">;
+def int_hexagon_A2_satb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqw_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqw_xor_128B
-def int_hexagon_V6_veqw_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
+def int_hexagon_C2_cmpltu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpltu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth,QV_ftype_VIVI,2)
-// tag : V6_vgth
-def int_hexagon_V6_vgth :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgth">;
+def int_hexagon_S2_insertp :
+Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S2_insertp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_128B,QV_ftype_VIVI,2)
-// tag : V6_vgth_128B
-def int_hexagon_V6_vgth_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_128B">;
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_and
-def int_hexagon_V6_vgth_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_and">;
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_and_128B
-def int_hexagon_V6_vgth_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_nac">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_or
-def int_hexagon_V6_vgth_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_or">;
+def int_hexagon_S2_extractup_rp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_extractup_rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_or_128B
-def int_hexagon_V6_vgth_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
+def int_hexagon_S4_vxaddsubw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_xor
-def int_hexagon_V6_vgth_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_xor">;
+def int_hexagon_S4_vxaddsubh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgth_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgth_xor_128B
-def int_hexagon_V6_vgth_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
+def int_hexagon_A2_asrh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_asrh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh,QV_ftype_VIVI,2)
-// tag : V6_veqh
-def int_hexagon_V6_veqh :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqh">;
+def int_hexagon_S4_extractp_rp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_extractp_rp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_128B,QV_ftype_VIVI,2)
-// tag : V6_veqh_128B
-def int_hexagon_V6_veqh_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_128B">;
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_and,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_and
-def int_hexagon_V6_veqh_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_and">;
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_and_128B
-def int_hexagon_V6_veqh_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_or,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_or
-def int_hexagon_V6_veqh_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_or">;
+def int_hexagon_C2_or :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_or_128B
-def int_hexagon_V6_veqh_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_xor,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_xor
-def int_hexagon_V6_veqh_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_xor">;
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqh_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqh_xor_128B
-def int_hexagon_V6_veqh_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
+def int_hexagon_A2_xor :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb,QV_ftype_VIVI,2)
-// tag : V6_vgtb
-def int_hexagon_V6_vgtb :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtb">;
+def int_hexagon_A2_add :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_add">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_128B,QV_ftype_VIVI,2)
-// tag : V6_vgtb_128B
-def int_hexagon_V6_vgtb_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_128B">;
+def int_hexagon_A2_vsububs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsububs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_and
-def int_hexagon_V6_vgtb_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_and">;
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_and_128B
-def int_hexagon_V6_vgtb_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_or
-def int_hexagon_V6_vgtb_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_or">;
+def int_hexagon_A2_vraddub_acc :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vraddub_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_or_128B
-def int_hexagon_V6_vgtb_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
+def int_hexagon_F2_sfinvsqrta :
+Hexagon_floati32_float_Intrinsic<"HEXAGON_F2_sfinvsqrta">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_xor
-def int_hexagon_V6_vgtb_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_xor">;
+def int_hexagon_S2_ct0p :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct0p">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtb_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtb_xor_128B
-def int_hexagon_V6_vgtb_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
+def int_hexagon_A2_svaddh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb,QV_ftype_VIVI,2)
-// tag : V6_veqb
-def int_hexagon_V6_veqb :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqb">;
+def int_hexagon_S2_vcrotate :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcrotate">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_128B,QV_ftype_VIVI,2)
-// tag : V6_veqb_128B
-def int_hexagon_V6_veqb_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_128B">;
+def int_hexagon_A2_aslh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_aslh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_and,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_and
-def int_hexagon_V6_veqb_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_and">;
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_and_128B
-def int_hexagon_V6_veqb_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
+
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
+
+def int_hexagon_S2_asr_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_p">;
+
+def int_hexagon_S2_vsplatrh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsplatrh">;
+
+def int_hexagon_S2_asr_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r">;
+
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
+
+def int_hexagon_S2_vsplatrb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_vsplatrb">;
+
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
+
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
+
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
+
+def int_hexagon_C2_muxri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxri">;
+
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
+
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
+
+def int_hexagon_C2_pxfer_map :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_pxfer_map">;
+
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
+
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
+
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_or">;
+
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
+
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
+
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
+
+def int_hexagon_A2_vaddw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddw">;
+
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_and">;
+
+def int_hexagon_A2_vaddh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddh">;
+
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
+
+def int_hexagon_C2_cmpeqp :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpeqp">;
+
+def int_hexagon_M4_mpyri_addi :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addi">;
+
+def int_hexagon_A2_not :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_not">;
+
+def int_hexagon_S4_andi_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_lsr_ri">;
+
+def int_hexagon_M2_macsip :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsip">;
+
+def int_hexagon_A2_tfrcrr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrcrr">;
+
+def int_hexagon_M2_macsin :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsin">;
+
+def int_hexagon_C2_orn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_orn">;
+
+def int_hexagon_M4_and_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_andn">;
+
+def int_hexagon_F2_sfmpy :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmpy">;
+
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
+
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
+
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
+
+def int_hexagon_S2_asr_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vw">;
+
+def int_hexagon_M4_and_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_or">;
+
+def int_hexagon_S2_asr_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vh">;
+
+def int_hexagon_C2_mask :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_C2_mask">;
+
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
+
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
+
+def int_hexagon_M2_mpy_up_s1_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
+
+def int_hexagon_A4_vcmpbgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbgt">;
+
+def int_hexagon_M5_vrmacbsu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbsu">;
+
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
+
+def int_hexagon_A2_vrsadub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vrsadub">;
+
+def int_hexagon_A2_tfrrcr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrrcr">;
+
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
+
+def int_hexagon_F2_dfcmpge :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpge">;
+
+def int_hexagon_M2_accii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_accii">;
+
+def int_hexagon_A5_vaddhubs :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A5_vaddhubs">;
+
+def int_hexagon_A2_vmaxw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxw">;
+
+def int_hexagon_A2_vmaxb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxb">;
+
+def int_hexagon_A2_vmaxh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxh">;
+
+def int_hexagon_S2_vsxthw :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxthw">;
+
+def int_hexagon_S4_andi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_asl_ri">;
+
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_nac">;
+
+def int_hexagon_S2_lsl_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
+
+def int_hexagon_C2_cmpgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgt">;
+
+def int_hexagon_F2_conv_df2d_chop :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
+
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
+
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
+
+def int_hexagon_F2_conv_sf2w :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w">;
+
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
+
+def int_hexagon_F2_sfclass :
+Hexagon_i32_floati32_Intrinsic<"HEXAGON_F2_sfclass">;
+
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
+
+def int_hexagon_M4_xor_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_andn">;
+
+def int_hexagon_S2_addasl_rrri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_addasl_rrri">;
+
+def int_hexagon_M5_vdmpybsu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vdmpybsu">;
+
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
+
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
+
+def int_hexagon_A2_addi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addi">;
+
+def int_hexagon_A2_addp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addp">;
+
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
+
+def int_hexagon_S4_clbpnorm :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S4_clbpnorm">;
+
+def int_hexagon_A4_round_rr_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr_sat">;
+
+def int_hexagon_M2_nacci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_nacci">;
+
+def int_hexagon_S2_shuffeh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeh">;
+
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_and">;
+
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
+
+def int_hexagon_F2_conv_sf2uw :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
+
+def int_hexagon_A2_vsubh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubh">;
+
+def int_hexagon_F2_conv_sf2ud :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
+
+def int_hexagon_A2_vsubw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubw">;
+
+def int_hexagon_A2_vcmpwgt :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgt">;
+
+def int_hexagon_M4_xor_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_or">;
+
+def int_hexagon_F2_conv_sf2uw_chop :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
+
+def int_hexagon_S2_asl_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vw">;
+
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
+
+def int_hexagon_S2_asl_r_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vh">;
+
+def int_hexagon_A2_svsubuhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubuhs">;
+
+def int_hexagon_M5_vmpybsu :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybsu">;
+
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
+
+def int_hexagon_C4_and_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_and">;
+
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
+
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
+
+def int_hexagon_S2_lsr_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p">;
+
+def int_hexagon_S2_lsr_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r">;
+
+def int_hexagon_A4_subp_c :
+Hexagon_i64i32_i64i64i32_Intrinsic<"HEXAGON_A4_subp_c">;
+
+def int_hexagon_A2_vsubhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubhs">;
+
+def int_hexagon_C2_vitpack :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_vitpack">;
+
+def int_hexagon_A2_vavguhr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguhr">;
+
+def int_hexagon_S2_vsplicerb :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vsplicerb">;
+
+def int_hexagon_C4_nbitsclr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclr">;
+
+def int_hexagon_A2_vcmpbgtu :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
+
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s1">;
+
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s0">;
+
+def int_hexagon_F2_dfcmpuo :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpuo">;
+
+def int_hexagon_S2_shuffob :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffob">;
+
+def int_hexagon_C2_and :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_and">;
+
+def int_hexagon_S5_popcountp :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S5_popcountp">;
+
+def int_hexagon_S4_extractp :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_extractp">;
+
+def int_hexagon_S2_cl0 :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl0">;
+
+def int_hexagon_A4_vcmpbgti :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgti">;
+
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s1">;
+
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s0">;
+
+def int_hexagon_C4_cmpneq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneq">;
+
+def int_hexagon_M2_vmac2es :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es">;
+
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
+
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
+
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
+
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
+
+def int_hexagon_S2_clb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clb">;
+
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
+
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
+
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
+
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
+
+def int_hexagon_M2_maci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_maci">;
+
+def int_hexagon_A2_vmaxuh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuh">;
+
+def int_hexagon_A4_bitspliti :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitspliti">;
+
+def int_hexagon_A2_vmaxub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxub">;
+
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
+
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
+
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrmac_s0">;
+
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
+
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
+
+def int_hexagon_F2_conv_sf2d :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d">;
+
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
+
+def int_hexagon_F2_dfimm_n :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_n">;
+
+def int_hexagon_A4_cmphgt :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgt">;
+
+def int_hexagon_F2_dfimm_p :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_p">;
+
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
+
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
+
+def int_hexagon_M4_mpyri_addr_u2 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr_u2">;
+
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
+
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
+
+def int_hexagon_M5_vrmacbuu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbuu">;
+
+def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax">;
+
+def int_hexagon_S2_vspliceib :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vspliceib">;
+
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
+
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s1">;
+
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s0">;
+
+def int_hexagon_A2_maxu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_maxu">;
+
+def int_hexagon_A2_maxp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxp">;
+
+def int_hexagon_A2_andir :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_andir">;
+
+def int_hexagon_F2_sfrecipa :
+Hexagon_floati32_floatfloat_Intrinsic<"HEXAGON_F2_sfrecipa">;
+
+def int_hexagon_A2_combineii :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combineii">;
+
+def int_hexagon_A4_orn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_orn">;
+
+def int_hexagon_A4_cmpbgtui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtui">;
+
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
+
+def int_hexagon_A4_vcmpbeqi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbeqi">;
+
+def int_hexagon_S2_lsl_r_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r">;
+
+def int_hexagon_S2_lsl_r_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p">;
+
+def int_hexagon_A2_or :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_or">;
+
+def int_hexagon_F2_dfcmpeq :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpeq">;
+
+def int_hexagon_C2_cmpeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeq">;
+
+def int_hexagon_A2_tfrp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_tfrp">;
+
+def int_hexagon_C4_and_andn :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_andn">;
+
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
+
+def int_hexagon_A2_satuh :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satuh">;
+
+def int_hexagon_A2_satub :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satub">;
+
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
+
+def int_hexagon_S4_or_ori :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_ori">;
+
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
+
+def int_hexagon_A2_tfrih :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfrih">;
+
+def int_hexagon_A2_tfril :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfril">;
+
+def int_hexagon_M4_mpyri_addr :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr">;
+
+def int_hexagon_S2_vtrunehb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunehb">;
+
+def int_hexagon_A2_vabsw :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsw">;
+
+def int_hexagon_A2_vabsh :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsh">;
+
+def int_hexagon_F2_sfsub :
+Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfsub">;
+
+def int_hexagon_C2_muxii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxii">;
+
+def int_hexagon_C2_muxir :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxir">;
+
+def int_hexagon_A2_swiz :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_swiz">;
+
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_and">;
+
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
+
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
+
+def int_hexagon_A2_vraddub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vraddub">;
+
+def int_hexagon_A4_tlbmatch :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_tlbmatch">;
+
+def int_hexagon_F2_conv_df2w_chop :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
+
+def int_hexagon_A2_and :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_and">;
+
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
+
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
+
+def int_hexagon_S4_extract :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_extract">;
+
+def int_hexagon_A2_vcmpweq :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpweq">;
+
+def int_hexagon_M2_acci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_acci">;
+
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_acc">;
+
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_or">;
+
+def int_hexagon_F2_conv_ud2sf :
+Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
+
+def int_hexagon_A2_tfr :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfr">;
+
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_or">;
+
+def int_hexagon_A2_subri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subri">;
+
+def int_hexagon_A4_vrmaxuw :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuw">;
+
+def int_hexagon_M5_vmpybuu :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybuu">;
+
+def int_hexagon_A4_vrmaxuh :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuh">;
+
+def int_hexagon_S2_asl_i_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vw">;
+
+def int_hexagon_A2_vavgw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgw">;
+
+def int_hexagon_S2_brev :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_brev">;
+
+def int_hexagon_A2_vavgh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgh">;
+
+def int_hexagon_S2_clrbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_i">;
+
+def int_hexagon_S2_asl_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vh">;
+
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_or">;
+
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
+
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
+
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
+
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
+
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
+
+def int_hexagon_M2_naccii :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_naccii">;
+
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
+
+def int_hexagon_S2_vtrunewh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunewh">;
+
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
+
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
+
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
+
+def int_hexagon_M4_mac_up_s1_sat :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
+
+def int_hexagon_S4_vrcrotate_acc :
+Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate_acc">;
+
+def int_hexagon_F2_conv_uw2df :
+Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_uw2df">;
+
+def int_hexagon_A2_vaddubs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddubs">;
+
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
+
+def int_hexagon_A2_orir :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_orir">;
+
+def int_hexagon_A2_andp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_andp">;
+
+def int_hexagon_S2_lfsp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_lfsp">;
+
+def int_hexagon_A2_min :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_min">;
+
+def int_hexagon_M2_mpysmi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysmi">;
+
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
+
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
+
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
+
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
+
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
+
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
+
+def int_hexagon_F2_conv_sf2df :
+Hexagon_double_float_Intrinsic<"HEXAGON_F2_conv_sf2df">;
+
+def int_hexagon_S2_vtrunohb :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunohb">;
+
+def int_hexagon_F2_conv_sf2d_chop :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
+
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
+
+def int_hexagon_F2_conv_df2w :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w">;
+
+def int_hexagon_S5_asrhub_sat :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_sat">;
+
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_xacc">;
+
+def int_hexagon_F2_conv_df2d :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d">;
+
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
+
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
+
+def int_hexagon_A2_svadduhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svadduhs">;
+
+def int_hexagon_F2_conv_sf2w_chop :
+Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
+
+def int_hexagon_S2_svsathub :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathub">;
+
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
+
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
+
+def int_hexagon_S2_setbit_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_r">;
+
+def int_hexagon_A2_vavghr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghr">;
+
+def int_hexagon_F2_sffma_sc :
+Hexagon_float_floatfloatfloati32_Intrinsic<"HEXAGON_F2_sffma_sc">;
+
+def int_hexagon_F2_dfclass :
+Hexagon_i32_doublei32_Intrinsic<"HEXAGON_F2_dfclass">;
+
+def int_hexagon_F2_conv_df2ud :
+Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud">;
+
+def int_hexagon_F2_conv_df2uw :
+Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw">;
+
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
+
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
+
+def int_hexagon_C4_cmpltei :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpltei">;
+
+def int_hexagon_C4_cmplteu :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteu">;
+
+def int_hexagon_A2_vsubb_map :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubb_map">;
+
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
+
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd">;
+
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
+
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
+
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
+
+def int_hexagon_A2_minup :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minup">;
+
+def int_hexagon_S2_valignrb :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignrb">;
+
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
+
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
+
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
+
+def int_hexagon_A2_vaddub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddub">;
+
+def int_hexagon_A2_combine_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_lh">;
+
+def int_hexagon_M5_vdmacbsu :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vdmacbsu">;
+
+def int_hexagon_A2_combine_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_ll">;
+
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
+
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
+
+def int_hexagon_S2_asr_i_p_rnd :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd">;
+
+def int_hexagon_A2_addpsat :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addpsat">;
+
+def int_hexagon_A2_svaddhs :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddhs">;
+
+def int_hexagon_S4_ori_lsr_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_lsr_ri">;
+
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
+
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
+
+def int_hexagon_A2_vminw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminw">;
+
+def int_hexagon_A2_vminh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminh">;
+
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
+
+def int_hexagon_A2_vminb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminb">;
+
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
+
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
+
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
+
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
+
+def int_hexagon_S4_lsli :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_lsli">;
+
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
+
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
+
+def int_hexagon_M4_vrmpyeh_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
+
+def int_hexagon_M4_vrmpyeh_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
+
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
+
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
+
+def int_hexagon_M2_vraddh :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vraddh">;
+
+def int_hexagon_C2_tfrrp :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrrp">;
+
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
+
+def int_hexagon_S2_vtrunowh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunowh">;
+
+def int_hexagon_A2_abs :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abs">;
+
+def int_hexagon_A4_cmpbeq :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeq">;
+
+def int_hexagon_A2_negp :
+Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_negp">;
+
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_sat">;
+
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
+
+def int_hexagon_S2_vsatwuh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwuh">;
+
+def int_hexagon_F2_dfcmpgt :
+Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpgt">;
+
+def int_hexagon_S2_svsathb :
+Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathb">;
+
+def int_hexagon_C2_cmpgtup :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtup">;
+
+def int_hexagon_A4_cround_ri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_ri">;
+
+def int_hexagon_S4_clbpaddi :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S4_clbpaddi">;
+
+def int_hexagon_A4_cround_rr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_rr">;
+
+def int_hexagon_C2_mux :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_mux">;
+
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
+
+def int_hexagon_S2_shuffeb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeb">;
+
+def int_hexagon_A2_vminuw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuw">;
+
+def int_hexagon_A2_vaddhs :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddhs">;
+
+def int_hexagon_S2_insert_rp :
+Hexagon_i32_i32i32i64_Intrinsic<"HEXAGON_S2_insert_rp">;
+
+def int_hexagon_A2_vminuh :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuh">;
+
+def int_hexagon_A2_vminub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminub">;
+
+def int_hexagon_S2_extractu :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_extractu">;
+
+def int_hexagon_A2_svsubh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubh">;
+
+def int_hexagon_S4_clbaddi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_clbaddi">;
+
+def int_hexagon_F2_sffms :
+Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms">;
+
+def int_hexagon_S2_vsxtbh :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxtbh">;
+
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
+
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
+
+def int_hexagon_A2_subp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_subp">;
+
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
+
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
+
+def int_hexagon_S4_parity :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_parity">;
+
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
+
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
+
+def int_hexagon_S4_addi_asl_ri :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_asl_ri">;
+
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
+
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
+
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_nac">;
+
+def int_hexagon_A4_cmpheqi :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheqi">;
+
+def int_hexagon_S2_lsr_r_p_xor :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
+
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
+
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
+
+def int_hexagon_F2_conv_sf2ud_chop :
+Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
+
+def int_hexagon_C2_cmpgeui :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgeui">;
+
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
+
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
+
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
+
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
+
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
+
+def int_hexagon_M4_nac_up_s1_sat :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
+
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
+
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
+
+def int_hexagon_A4_round_ri_sat :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri_sat">;
+
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
+
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
+
+def int_hexagon_A2_vavghcr :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghcr">;
+
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
+
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
+
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmaci_s0">;
+
+def int_hexagon_S2_setbit_i :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_i">;
+
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_or">;
+
+def int_hexagon_A4_andn :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_andn">;
+
+def int_hexagon_M5_vrmpybsu :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybsu">;
+
+def int_hexagon_S2_vrndpackwh :
+Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwh">;
+
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
+
+def int_hexagon_A2_vmaxuw :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuw">;
+
+def int_hexagon_C2_bitsclr :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclr">;
+
+def int_hexagon_M2_xor_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_xor_xacc">;
+
+def int_hexagon_A4_vcmpbgtui :
+Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgtui">;
+
+def int_hexagon_A4_ornp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_ornp">;
+
+def int_hexagon_A2_tfrpi :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_tfrpi">;
+
+def int_hexagon_C4_and_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_or">;
+
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
+
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
+
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
+
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
+
+def int_hexagon_M2_vmpy2su_s1 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
+
+def int_hexagon_M2_vmpy2su_s0 :
+Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
+
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_acc">;
+
+def int_hexagon_C4_nbitsclri :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclri">;
+
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vh">;
+
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc">;
+
+// V55 Scalar Instructions.
+
+def int_hexagon_A5_ACS :
+Hexagon_i64i32_i64i64i64_Intrinsic<"HEXAGON_A5_ACS">;
+
+// V60 Scalar Instructions.
+
+def int_hexagon_S6_rol_i_p_and :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_and">;
+
+def int_hexagon_S6_rol_i_r_xacc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_xacc">;
+
+def int_hexagon_S6_rol_i_r_and :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_and">;
+
+def int_hexagon_S6_rol_i_r_acc :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_acc">;
+
+def int_hexagon_S6_rol_i_p_xacc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_xacc">;
+
+def int_hexagon_S6_rol_i_p :
+Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S6_rol_i_p">;
+
+def int_hexagon_S6_rol_i_p_nac :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_nac">;
+
+def int_hexagon_S6_rol_i_p_acc :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_acc">;
+
+def int_hexagon_S6_rol_i_r_or :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_or">;
+
+def int_hexagon_S6_rol_i_r :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S6_rol_i_r">;
+
+def int_hexagon_S6_rol_i_r_nac :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_nac">;
+
+def int_hexagon_S6_rol_i_p_or :
+Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_or">;
+
+// V62 Scalar Instructions.
+
+def int_hexagon_S6_vtrunehb_ppp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
+
+def int_hexagon_V6_ldntnt0 :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldntnt0">;
+
+def int_hexagon_M6_vabsdiffub :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffub">;
+
+def int_hexagon_S6_vtrunohb_ppp :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
+
+def int_hexagon_M6_vabsdiffb :
+Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffb">;
+
+def int_hexagon_A6_vminub_RdP :
+Hexagon_i64i32_i64i64_Intrinsic<"HEXAGON_A6_vminub_RdP">;
+
+def int_hexagon_S6_vsplatrbp :
+Hexagon_i64_i32_Intrinsic<"HEXAGON_S6_vsplatrbp">;
+
+// V65 Scalar Instructions.
+
+def int_hexagon_A6_vcmpbeq_notany :
+Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;
+
+// V66 Scalar Instructions.
+
+def int_hexagon_F2_dfsub :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfsub">;
+
+def int_hexagon_F2_dfadd :
+Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfadd">;
+
+def int_hexagon_M2_mnaci :
+Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mnaci">;
+
+def int_hexagon_S2_mask :
+Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_mask">;
+
+// V60 HVX Instructions.
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_or,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_or
def int_hexagon_V6_veqb_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_or">;
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_or_128B
def int_hexagon_V6_veqb_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_xor,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_xor
-def int_hexagon_V6_veqb_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_xor">;
+def int_hexagon_V6_vminub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_veqb_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_veqb_xor_128B
-def int_hexagon_V6_veqb_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
+def int_hexagon_V6_vminub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw,QV_ftype_VIVI,2)
-// tag : V6_vgtuw
-def int_hexagon_V6_vgtuw :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw">;
+def int_hexagon_V6_vaslw_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_128B,QV_ftype_VIVI,2)
-// tag : V6_vgtuw_128B
-def int_hexagon_V6_vgtuw_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
+def int_hexagon_V6_vaslw_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_and
-def int_hexagon_V6_vgtuw_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_and">;
+def int_hexagon_V6_vmpyhvsrs :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_and_128B
-def int_hexagon_V6_vgtuw_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
+def int_hexagon_V6_vmpyhvsrs_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_or
-def int_hexagon_V6_vgtuw_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_or">;
+def int_hexagon_V6_vsathub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsathub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_or_128B
-def int_hexagon_V6_vgtuw_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
+def int_hexagon_V6_vsathub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsathub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_xor
-def int_hexagon_V6_vgtuw_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
+def int_hexagon_V6_vaddh_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuw_xor_128B
-def int_hexagon_V6_vgtuw_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
+def int_hexagon_V6_vaddh_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh,QV_ftype_VIVI,2)
-// tag : V6_vgtuh
-def int_hexagon_V6_vgtuh :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh">;
+def int_hexagon_V6_vrmpybusi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_128B,QV_ftype_VIVI,2)
-// tag : V6_vgtuh_128B
-def int_hexagon_V6_vgtuh_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
+def int_hexagon_V6_vrmpybusi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_128B">;
+
+def int_hexagon_V6_vshufoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoh">;
+
+def int_hexagon_V6_vshufoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
+
+def int_hexagon_V6_vasrwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrwv">;
+
+def int_hexagon_V6_vasrwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
+
+def int_hexagon_V6_vdmpyhsuisat :
+Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
+
+def int_hexagon_V6_vdmpyhsuisat_128B :
+Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
+
+def int_hexagon_V6_vrsadubi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc">;
+
+def int_hexagon_V6_vrsadubi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B">;
+
+def int_hexagon_V6_vnavgw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgw">;
+
+def int_hexagon_V6_vnavgw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
+
+def int_hexagon_V6_vnavgh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgh">;
+
+def int_hexagon_V6_vnavgh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
+
+def int_hexagon_V6_vavgub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgub">;
+
+def int_hexagon_V6_vavgub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgub_128B">;
+
+def int_hexagon_V6_vsubb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubb">;
+
+def int_hexagon_V6_vsubb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_128B">;
+
+def int_hexagon_V6_vgtw_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_and">;
+
+def int_hexagon_V6_vgtw_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
+
+def int_hexagon_V6_vavgubrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgubrnd">;
+
+def int_hexagon_V6_vavgubrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
+
+def int_hexagon_V6_vrmpybusv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv">;
+
+def int_hexagon_V6_vrmpybusv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
+
+def int_hexagon_V6_vsubbnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbnq">;
+
+def int_hexagon_V6_vsubbnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
+
+def int_hexagon_V6_vroundhb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhb">;
+
+def int_hexagon_V6_vroundhb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
+
+def int_hexagon_V6_vadduhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
+
+def int_hexagon_V6_vadduhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
+
+def int_hexagon_V6_vsububsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububsat">;
+
+def int_hexagon_V6_vsububsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
+
+def int_hexagon_V6_vmpabus_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
+
+def int_hexagon_V6_vmpabus_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
+
+def int_hexagon_V6_vmux :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vmux">;
+
+def int_hexagon_V6_vmux_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vmux_128B">;
+
+def int_hexagon_V6_vmpyhus :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus">;
+
+def int_hexagon_V6_vmpyhus_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
+
+def int_hexagon_V6_vpackeb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeb">;
+
+def int_hexagon_V6_vpackeb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
+
+def int_hexagon_V6_vsubhnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhnq">;
+
+def int_hexagon_V6_vsubhnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
+
+def int_hexagon_V6_vavghrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavghrnd">;
+
+def int_hexagon_V6_vavghrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
+
+def int_hexagon_V6_vtran2x2_map :
+Hexagon_v16i32v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vtran2x2_map">;
+
+def int_hexagon_V6_vtran2x2_map_128B :
+Hexagon_v32i32v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtran2x2_map_128B">;
+
+def int_hexagon_V6_vdelta :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdelta">;
+
+def int_hexagon_V6_vdelta_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdelta_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_and
def int_hexagon_V6_vgtuh_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_and">;
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_and_128B
def int_hexagon_V6_vgtuh_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_or
-def int_hexagon_V6_vgtuh_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_or">;
+def int_hexagon_V6_vtmpyhb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_or_128B
-def int_hexagon_V6_vgtuh_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
+def int_hexagon_V6_vtmpyhb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_xor
-def int_hexagon_V6_vgtuh_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
+def int_hexagon_V6_vpackob :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackob">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtuh_xor_128B
-def int_hexagon_V6_vgtuh_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
+def int_hexagon_V6_vpackob_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackob_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub,QV_ftype_VIVI,2)
-// tag : V6_vgtub
-def int_hexagon_V6_vgtub :
-Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtub">;
+def int_hexagon_V6_vmaxh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_128B,QV_ftype_VIVI,2)
-// tag : V6_vgtub_128B
-def int_hexagon_V6_vgtub_128B :
-Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_128B">;
+def int_hexagon_V6_vmaxh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_and,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_and
-def int_hexagon_V6_vgtub_and :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_and">;
+def int_hexagon_V6_vtmpybus_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_and_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_and_128B
-def int_hexagon_V6_vgtub_and_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
+def int_hexagon_V6_vtmpybus_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_or,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_or
-def int_hexagon_V6_vgtub_or :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_or">;
+def int_hexagon_V6_vsubuhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_or_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_or_128B
-def int_hexagon_V6_vgtub_or_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
+def int_hexagon_V6_vsubuhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_xor,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_xor
-def int_hexagon_V6_vgtub_xor :
-Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_xor">;
+def int_hexagon_V6_vasrw_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vgtub_xor_128B,QV_ftype_QVVIVI,3)
-// tag : V6_vgtub_xor_128B
-def int_hexagon_V6_vgtub_xor_128B :
-Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
+def int_hexagon_V6_vasrw_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_or,QV_ftype_QVQV,2)
-// tag : V6_pred_or
def int_hexagon_V6_pred_or :
-Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or">;
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_or_128B,QV_ftype_QVQV,2)
-// tag : V6_pred_or_128B
def int_hexagon_V6_pred_or_128B :
-Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_128B">;
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_or_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_and,QV_ftype_QVQV,2)
-// tag : V6_pred_and
-def int_hexagon_V6_pred_and :
-Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and">;
+def int_hexagon_V6_vrmpyub_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_and_128B,QV_ftype_QVQV,2)
-// tag : V6_pred_and_128B
-def int_hexagon_V6_pred_and_128B :
-Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_128B">;
+def int_hexagon_V6_vrmpyub_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_not,QV_ftype_QV,1)
-// tag : V6_pred_not
-def int_hexagon_V6_pred_not :
-Hexagon_v64iv64i_Intrinsic<"HEXAGON_V6_pred_not">;
+def int_hexagon_V6_lo :
+Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_lo">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_not_128B,QV_ftype_QV,1)
-// tag : V6_pred_not_128B
-def int_hexagon_V6_pred_not_128B :
-Hexagon_v128iv128i_Intrinsic<"HEXAGON_V6_pred_not_128B">;
+def int_hexagon_V6_lo_128B :
+Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_lo_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_xor,QV_ftype_QVQV,2)
-// tag : V6_pred_xor
-def int_hexagon_V6_pred_xor :
-Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_xor">;
+def int_hexagon_V6_vsubb_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_xor_128B,QV_ftype_QVQV,2)
-// tag : V6_pred_xor_128B
-def int_hexagon_V6_pred_xor_128B :
-Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
+def int_hexagon_V6_vsubb_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
+
+def int_hexagon_V6_vsubhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
+
+def int_hexagon_V6_vsubhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
+
+def int_hexagon_V6_vmpyiwh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh">;
+
+def int_hexagon_V6_vmpyiwh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
+
+def int_hexagon_V6_vmpyiwb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb">;
+
+def int_hexagon_V6_vmpyiwb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
+
+def int_hexagon_V6_ldu0 :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldu0">;
+
+def int_hexagon_V6_ldu0_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ldu0_128B">;
+
+def int_hexagon_V6_vgtuh_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
+
+def int_hexagon_V6_vgtuh_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
+
+def int_hexagon_V6_vgth_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_or">;
+
+def int_hexagon_V6_vgth_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
+
+def int_hexagon_V6_vavgh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgh">;
+
+def int_hexagon_V6_vavgh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgh_128B">;
+
+def int_hexagon_V6_vlalignb :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignb">;
+
+def int_hexagon_V6_vlalignb_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
+
+def int_hexagon_V6_vsh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsh">;
+
+def int_hexagon_V6_vsh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_and_n,QV_ftype_QVQV,2)
-// tag : V6_pred_and_n
def int_hexagon_V6_pred_and_n :
-Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and_n">;
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_and_n">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_and_n_128B,QV_ftype_QVQV,2)
-// tag : V6_pred_and_n_128B
def int_hexagon_V6_pred_and_n_128B :
-Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_or_n,QV_ftype_QVQV,2)
-// tag : V6_pred_or_n
-def int_hexagon_V6_pred_or_n :
-Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or_n">;
+def int_hexagon_V6_vsb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_or_n_128B,QV_ftype_QVQV,2)
-// tag : V6_pred_or_n_128B
-def int_hexagon_V6_pred_or_n_128B :
-Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
+def int_hexagon_V6_vsb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_scalar2,QV_ftype_SI,1)
-// tag : V6_pred_scalar2
-def int_hexagon_V6_pred_scalar2 :
-Hexagon_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2">;
+def int_hexagon_V6_vroundwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_scalar2_128B,QV_ftype_SI,1)
-// tag : V6_pred_scalar2_128B
-def int_hexagon_V6_pred_scalar2_128B :
-Hexagon_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
+def int_hexagon_V6_vroundwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmux,VI_ftype_QVVIVI,3)
-// tag : V6_vmux
-def int_hexagon_V6_vmux :
-Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vmux">;
+def int_hexagon_V6_vasrhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrhv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmux_128B,VI_ftype_QVVIVI,3)
-// tag : V6_vmux_128B
-def int_hexagon_V6_vmux_128B :
-Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vmux_128B">;
+def int_hexagon_V6_vasrhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vswap,VD_ftype_QVVIVI,3)
-// tag : V6_vswap
-def int_hexagon_V6_vswap :
-Hexagon_v1024v64iv512v512_Intrinsic<"HEXAGON_V6_vswap">;
+def int_hexagon_V6_vshuffh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vswap_128B,VD_ftype_QVVIVI,3)
-// tag : V6_vswap_128B
-def int_hexagon_V6_vswap_128B :
-Hexagon_v2048v128iv1024v1024_Intrinsic<"HEXAGON_V6_vswap_128B">;
+def int_hexagon_V6_vshuffh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
+
+def int_hexagon_V6_vaddhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
+
+def int_hexagon_V6_vaddhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
+
+def int_hexagon_V6_vnavgub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgub">;
+
+def int_hexagon_V6_vnavgub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
+
+def int_hexagon_V6_vrmpybv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv">;
+
+def int_hexagon_V6_vrmpybv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
+
+def int_hexagon_V6_vnormamth :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamth">;
+
+def int_hexagon_V6_vnormamth_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
+
+def int_hexagon_V6_vdmpyhb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb">;
+
+def int_hexagon_V6_vdmpyhb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
+
+def int_hexagon_V6_vavguh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguh">;
+
+def int_hexagon_V6_vavguh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguh_128B">;
+
+def int_hexagon_V6_vlsrwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrwv">;
+
+def int_hexagon_V6_vlsrwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
+
+def int_hexagon_V6_vlsrhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrhv">;
+
+def int_hexagon_V6_vlsrhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
+
+def int_hexagon_V6_vdmpyhisat :
+Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
+
+def int_hexagon_V6_vdmpyhisat_128B :
+Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
+
+def int_hexagon_V6_vdmpyhvsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
+
+def int_hexagon_V6_vdmpyhvsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
+
+def int_hexagon_V6_vaddw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddw">;
+
+def int_hexagon_V6_vaddw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_128B">;
+
+def int_hexagon_V6_vzh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzh">;
+
+def int_hexagon_V6_vzh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzh_128B">;
+
+def int_hexagon_V6_vaddh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddh">;
+
+def int_hexagon_V6_vaddh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxub,VI_ftype_VIVI,2)
-// tag : V6_vmaxub
def int_hexagon_V6_vmaxub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxub">;
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxub_128B,VI_ftype_VIVI,2)
-// tag : V6_vmaxub_128B
def int_hexagon_V6_vmaxub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminub,VI_ftype_VIVI,2)
-// tag : V6_vminub
-def int_hexagon_V6_vminub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminub">;
+def int_hexagon_V6_vmpyhv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminub_128B,VI_ftype_VIVI,2)
-// tag : V6_vminub_128B
-def int_hexagon_V6_vminub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminub_128B">;
+def int_hexagon_V6_vmpyhv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxuh,VI_ftype_VIVI,2)
-// tag : V6_vmaxuh
-def int_hexagon_V6_vmaxuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxuh">;
+def int_hexagon_V6_vadduhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmaxuh_128B
-def int_hexagon_V6_vmaxuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
+def int_hexagon_V6_vadduhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
+
+def int_hexagon_V6_vshufoeh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeh">;
+
+def int_hexagon_V6_vshufoeh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
+
+def int_hexagon_V6_vmpyuhv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
+
+def int_hexagon_V6_vmpyuhv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
+
+def int_hexagon_V6_veqh :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh">;
+
+def int_hexagon_V6_veqh_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_128B">;
+
+def int_hexagon_V6_vmpabuuv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabuuv">;
+
+def int_hexagon_V6_vmpabuuv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
+
+def int_hexagon_V6_vasrwhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat">;
+
+def int_hexagon_V6_vasrwhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminuh,VI_ftype_VIVI,2)
-// tag : V6_vminuh
def int_hexagon_V6_vminuh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminuh">;
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vminuh_128B
def int_hexagon_V6_vminuh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminuh_128B">;
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxh,VI_ftype_VIVI,2)
-// tag : V6_vmaxh
-def int_hexagon_V6_vmaxh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxh">;
+def int_hexagon_V6_vror :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vror">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxh_128B,VI_ftype_VIVI,2)
-// tag : V6_vmaxh_128B
-def int_hexagon_V6_vmaxh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
+def int_hexagon_V6_vror_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vror_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminh,VI_ftype_VIVI,2)
-// tag : V6_vminh
-def int_hexagon_V6_vminh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminh">;
+def int_hexagon_V6_vmpyowh_rnd_sacc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminh_128B,VI_ftype_VIVI,2)
-// tag : V6_vminh_128B
-def int_hexagon_V6_vminh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminh_128B">;
+def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxw,VI_ftype_VIVI,2)
-// tag : V6_vmaxw
-def int_hexagon_V6_vmaxw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxw">;
+def int_hexagon_V6_vmaxuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxw_128B,VI_ftype_VIVI,2)
-// tag : V6_vmaxw_128B
-def int_hexagon_V6_vmaxw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
+def int_hexagon_V6_vmaxuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminw,VI_ftype_VIVI,2)
-// tag : V6_vminw
-def int_hexagon_V6_vminw :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminw">;
+def int_hexagon_V6_vabsh_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminw_128B,VI_ftype_VIVI,2)
-// tag : V6_vminw_128B
-def int_hexagon_V6_vminw_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminw_128B">;
+def int_hexagon_V6_vabsh_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsathub,VI_ftype_VIVI,2)
-// tag : V6_vsathub
-def int_hexagon_V6_vsathub :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsathub">;
+def int_hexagon_V6_pred_or_n :
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_or_n">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsathub_128B,VI_ftype_VIVI,2)
-// tag : V6_vsathub_128B
-def int_hexagon_V6_vsathub_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsathub_128B">;
+def int_hexagon_V6_pred_or_n_128B :
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsatwh,VI_ftype_VIVI,2)
-// tag : V6_vsatwh
-def int_hexagon_V6_vsatwh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsatwh">;
+def int_hexagon_V6_vdealb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsatwh_128B,VI_ftype_VIVI,2)
-// tag : V6_vsatwh_128B
-def int_hexagon_V6_vsatwh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
+def int_hexagon_V6_vdealb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffeb,VI_ftype_VIVI,2)
-// tag : V6_vshuffeb
-def int_hexagon_V6_vshuffeb :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffeb">;
+def int_hexagon_V6_vmpybusv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffeb_128B,VI_ftype_VIVI,2)
-// tag : V6_vshuffeb_128B
-def int_hexagon_V6_vshuffeb_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
+def int_hexagon_V6_vmpybusv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffob,VI_ftype_VIVI,2)
-// tag : V6_vshuffob
-def int_hexagon_V6_vshuffob :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffob">;
+def int_hexagon_V6_vzb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffob_128B,VI_ftype_VIVI,2)
-// tag : V6_vshuffob_128B
-def int_hexagon_V6_vshuffob_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
+def int_hexagon_V6_vzb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufeh,VI_ftype_VIVI,2)
-// tag : V6_vshufeh
-def int_hexagon_V6_vshufeh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufeh">;
+def int_hexagon_V6_vdmpybus_dv :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufeh_128B,VI_ftype_VIVI,2)
-// tag : V6_vshufeh_128B
-def int_hexagon_V6_vshufeh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
+def int_hexagon_V6_vdmpybus_dv_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoh,VI_ftype_VIVI,2)
-// tag : V6_vshufoh
-def int_hexagon_V6_vshufoh :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufoh">;
+def int_hexagon_V6_vaddbq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoh_128B,VI_ftype_VIVI,2)
-// tag : V6_vshufoh_128B
-def int_hexagon_V6_vshufoh_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
+def int_hexagon_V6_vaddbq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffvdd,VD_ftype_VIVISI,3)
-// tag : V6_vshuffvdd
-def int_hexagon_V6_vshuffvdd :
-Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vshuffvdd">;
+def int_hexagon_V6_vaddb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffvdd_128B,VD_ftype_VIVISI,3)
-// tag : V6_vshuffvdd_128B
-def int_hexagon_V6_vshuffvdd_128B :
-Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
+def int_hexagon_V6_vaddb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealvdd,VD_ftype_VIVISI,3)
-// tag : V6_vdealvdd
-def int_hexagon_V6_vdealvdd :
-Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vdealvdd">;
+def int_hexagon_V6_vaddwq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealvdd_128B,VD_ftype_VIVISI,3)
-// tag : V6_vdealvdd_128B
-def int_hexagon_V6_vdealvdd_128B :
-Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
+def int_hexagon_V6_vaddwq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoeh,VD_ftype_VIVI,2)
-// tag : V6_vshufoeh
-def int_hexagon_V6_vshufoeh :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeh">;
+def int_hexagon_V6_vasrhubrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoeh_128B,VD_ftype_VIVI,2)
-// tag : V6_vshufoeh_128B
-def int_hexagon_V6_vshufoeh_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
+def int_hexagon_V6_vasrhubrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
+
+def int_hexagon_V6_vasrhubsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat">;
+
+def int_hexagon_V6_vasrhubsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoeb,VD_ftype_VIVI,2)
-// tag : V6_vshufoeb
def int_hexagon_V6_vshufoeb :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeb">;
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshufoeb_128B,VD_ftype_VIVI,2)
-// tag : V6_vshufoeb_128B
def int_hexagon_V6_vshufoeb_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealh,VI_ftype_VI,1)
-// tag : V6_vdealh
-def int_hexagon_V6_vdealh :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealh">;
+def int_hexagon_V6_vpackhub_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealh_128B,VI_ftype_VI,1)
-// tag : V6_vdealh_128B
-def int_hexagon_V6_vdealh_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealh_128B">;
+def int_hexagon_V6_vpackhub_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealb,VI_ftype_VI,1)
-// tag : V6_vdealb
-def int_hexagon_V6_vdealb :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealb">;
+def int_hexagon_V6_vmpyiwh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealb_128B,VI_ftype_VI,1)
-// tag : V6_vdealb_128B
-def int_hexagon_V6_vdealb_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealb_128B">;
+def int_hexagon_V6_vmpyiwh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealb4w,VI_ftype_VIVI,2)
-// tag : V6_vdealb4w
-def int_hexagon_V6_vdealb4w :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdealb4w">;
+def int_hexagon_V6_vtmpyb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdealb4w_128B,VI_ftype_VIVI,2)
-// tag : V6_vdealb4w_128B
-def int_hexagon_V6_vdealb4w_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
+def int_hexagon_V6_vtmpyb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffh,VI_ftype_VI,1)
-// tag : V6_vshuffh
-def int_hexagon_V6_vshuffh :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffh">;
+def int_hexagon_V6_vmpabusv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabusv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffh_128B,VI_ftype_VI,1)
-// tag : V6_vshuffh_128B
-def int_hexagon_V6_vshuffh_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
+def int_hexagon_V6_vmpabusv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffb,VI_ftype_VI,1)
-// tag : V6_vshuffb
-def int_hexagon_V6_vshuffb :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffb">;
+def int_hexagon_V6_pred_and :
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vshuffb_128B,VI_ftype_VI,1)
-// tag : V6_vshuffb_128B
-def int_hexagon_V6_vshuffb_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
+def int_hexagon_V6_pred_and_128B :
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_and_128B">;
+
+def int_hexagon_V6_vsubwnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwnq">;
+
+def int_hexagon_V6_vsubwnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
+
+def int_hexagon_V6_vpackwuh_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
+
+def int_hexagon_V6_vpackwuh_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
+
+def int_hexagon_V6_vswap :
+Hexagon_v32i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vswap">;
+
+def int_hexagon_V6_vswap_128B :
+Hexagon_v64i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vswap_128B">;
+
+def int_hexagon_V6_vrmpyubv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
+
+def int_hexagon_V6_vrmpyubv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
+
+def int_hexagon_V6_vgtb_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_and">;
+
+def int_hexagon_V6_vgtb_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
+
+def int_hexagon_V6_vaslw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslw">;
+
+def int_hexagon_V6_vaslw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_128B">;
+
+def int_hexagon_V6_vpackhb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
+
+def int_hexagon_V6_vpackhb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
+
+def int_hexagon_V6_vmpyih_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
+
+def int_hexagon_V6_vmpyih_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
+
+def int_hexagon_V6_vshuffvdd :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd">;
+
+def int_hexagon_V6_vshuffvdd_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
+
+def int_hexagon_V6_vaddb_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_dv">;
+
+def int_hexagon_V6_vaddb_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
+
+def int_hexagon_V6_vunpackub :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackub">;
+
+def int_hexagon_V6_vunpackub_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
+
+def int_hexagon_V6_vgtuw :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw">;
+
+def int_hexagon_V6_vgtuw_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
+
+def int_hexagon_V6_vlutvwh :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh">;
+
+def int_hexagon_V6_vlutvwh_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
+
+def int_hexagon_V6_vgtub :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub">;
+
+def int_hexagon_V6_vgtub_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_128B">;
+
+def int_hexagon_V6_vmpyowh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh">;
+
+def int_hexagon_V6_vmpyowh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
+
+def int_hexagon_V6_vmpyieoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyieoh">;
+
+def int_hexagon_V6_vmpyieoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_extractw,SI_ftype_VISI,2)
-// tag : V6_extractw
def int_hexagon_V6_extractw :
-Hexagon_iv512i_Intrinsic<"HEXAGON_V6_extractw">;
+Hexagon_i32_v16i32i32_Intrinsic<"HEXAGON_V6_extractw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_extractw_128B,SI_ftype_VISI,2)
-// tag : V6_extractw_128B
def int_hexagon_V6_extractw_128B :
-Hexagon_iv1024i_Intrinsic<"HEXAGON_V6_extractw_128B">;
+Hexagon_i32_v32i32i32_Intrinsic<"HEXAGON_V6_extractw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vinsertwr,VI_ftype_VISI,2)
-// tag : V6_vinsertwr
-def int_hexagon_V6_vinsertwr :
-Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vinsertwr">;
+def int_hexagon_V6_vavgwrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgwrnd">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vinsertwr_128B,VI_ftype_VISI,2)
-// tag : V6_vinsertwr_128B
-def int_hexagon_V6_vinsertwr_128B :
-Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
+def int_hexagon_V6_vavgwrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplatw,VI_ftype_SI,1)
-// tag : V6_lvsplatw
-def int_hexagon_V6_lvsplatw :
-Hexagon_v512i_Intrinsic<"HEXAGON_V6_lvsplatw">;
+def int_hexagon_V6_vdmpyhsat_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplatw_128B,VI_ftype_SI,1)
-// tag : V6_lvsplatw_128B
-def int_hexagon_V6_lvsplatw_128B :
-Hexagon_v1024i_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
+def int_hexagon_V6_vdmpyhsat_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vassign,VI_ftype_VI,1)
-// tag : V6_vassign
-def int_hexagon_V6_vassign :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vassign">;
+def int_hexagon_V6_vgtub_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vassign_128B,VI_ftype_VI,1)
-// tag : V6_vassign_128B
-def int_hexagon_V6_vassign_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vassign_128B">;
+def int_hexagon_V6_vgtub_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
+
+def int_hexagon_V6_vmpyub :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub">;
+
+def int_hexagon_V6_vmpyub_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
+
+def int_hexagon_V6_vmpyuh :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh">;
+
+def int_hexagon_V6_vmpyuh_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
+
+def int_hexagon_V6_vunpackob :
+Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackob">;
+
+def int_hexagon_V6_vunpackob_128B :
+Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
+
+def int_hexagon_V6_vmpahb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb">;
+
+def int_hexagon_V6_vmpahb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
+
+def int_hexagon_V6_veqw_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_or">;
+
+def int_hexagon_V6_veqw_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
+
+def int_hexagon_V6_vandqrt :
+Hexagon_v16i32_v512i1i32_Intrinsic<"HEXAGON_V6_vandqrt">;
+
+def int_hexagon_V6_vandqrt_128B :
+Hexagon_v32i32_v1024i1i32_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
+
+def int_hexagon_V6_vxor :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vxor">;
+
+def int_hexagon_V6_vxor_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vxor_128B">;
+
+def int_hexagon_V6_vasrwhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
+
+def int_hexagon_V6_vasrwhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
+
+def int_hexagon_V6_vmpyhsat_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
+
+def int_hexagon_V6_vmpyhsat_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
+
+def int_hexagon_V6_vrmpybus_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
+
+def int_hexagon_V6_vrmpybus_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
+
+def int_hexagon_V6_vsubhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhw">;
+
+def int_hexagon_V6_vsubhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
+
+def int_hexagon_V6_vdealb4w :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdealb4w">;
+
+def int_hexagon_V6_vdealb4w_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
+
+def int_hexagon_V6_vmpyowh_sacc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
+
+def int_hexagon_V6_vmpyowh_sacc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
+
+def int_hexagon_V6_vmpybv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv">;
+
+def int_hexagon_V6_vmpybv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
+
+def int_hexagon_V6_vabsdiffh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffh">;
+
+def int_hexagon_V6_vabsdiffh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
+
+def int_hexagon_V6_vshuffob :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffob">;
+
+def int_hexagon_V6_vshuffob_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
+
+def int_hexagon_V6_vmpyub_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
+
+def int_hexagon_V6_vmpyub_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
+
+def int_hexagon_V6_vnormamtw :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamtw">;
+
+def int_hexagon_V6_vnormamtw_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
+
+def int_hexagon_V6_vunpackuh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackuh">;
+
+def int_hexagon_V6_vunpackuh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
+
+def int_hexagon_V6_vgtuh_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_or">;
+
+def int_hexagon_V6_vgtuh_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
+
+def int_hexagon_V6_vmpyiewuh_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
+
+def int_hexagon_V6_vmpyiewuh_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
+
+def int_hexagon_V6_vunpackoh :
+Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackoh">;
+
+def int_hexagon_V6_vunpackoh_128B :
+Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
+
+def int_hexagon_V6_vdmpyhsat :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
+
+def int_hexagon_V6_vdmpyhsat_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
+
+def int_hexagon_V6_vmpyubv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv">;
+
+def int_hexagon_V6_vmpyubv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
+
+def int_hexagon_V6_vmpyhss :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhss">;
+
+def int_hexagon_V6_vmpyhss_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
+
+def int_hexagon_V6_hi :
+Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_hi">;
+
+def int_hexagon_V6_hi_128B :
+Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_hi_128B">;
+
+def int_hexagon_V6_vasrwuhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
+
+def int_hexagon_V6_vasrwuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
+
+def int_hexagon_V6_veqw :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw">;
+
+def int_hexagon_V6_veqw_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_128B">;
+
+def int_hexagon_V6_vdsaduh :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh">;
+
+def int_hexagon_V6_vdsaduh_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
+
+def int_hexagon_V6_vsubw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubw">;
+
+def int_hexagon_V6_vsubw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_128B">;
+
+def int_hexagon_V6_vsubw_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_dv">;
+
+def int_hexagon_V6_vsubw_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
+
+def int_hexagon_V6_veqb_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_and">;
+
+def int_hexagon_V6_veqb_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
+
+def int_hexagon_V6_vmpyih :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih">;
+
+def int_hexagon_V6_vmpyih_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
+
+def int_hexagon_V6_vtmpyb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
+
+def int_hexagon_V6_vtmpyb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
+
+def int_hexagon_V6_vrmpybus :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus">;
+
+def int_hexagon_V6_vrmpybus_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
+
+def int_hexagon_V6_vmpybus_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
+
+def int_hexagon_V6_vmpybus_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
+
+def int_hexagon_V6_vgth_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_xor">;
+
+def int_hexagon_V6_vgth_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
+
+def int_hexagon_V6_vsubhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhsat">;
+
+def int_hexagon_V6_vsubhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
+
+def int_hexagon_V6_vrmpyubi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc">;
+
+def int_hexagon_V6_vrmpyubi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B">;
+
+def int_hexagon_V6_vabsw :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw">;
+
+def int_hexagon_V6_vabsw_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_128B">;
+
+def int_hexagon_V6_vaddwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
+
+def int_hexagon_V6_vaddwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
+
+def int_hexagon_V6_vlsrw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrw">;
+
+def int_hexagon_V6_vlsrw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
+
+def int_hexagon_V6_vabsh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh">;
+
+def int_hexagon_V6_vabsh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_128B">;
+
+def int_hexagon_V6_vlsrh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrh">;
+
+def int_hexagon_V6_vlsrh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
+
+def int_hexagon_V6_valignb :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignb">;
+
+def int_hexagon_V6_valignb_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignb_128B">;
+
+def int_hexagon_V6_vsubhq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhq">;
+
+def int_hexagon_V6_vsubhq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
+
+def int_hexagon_V6_vpackoh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackoh">;
+
+def int_hexagon_V6_vpackoh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
+
+def int_hexagon_V6_vdmpybus_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
+
+def int_hexagon_V6_vdmpybus_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
+
+def int_hexagon_V6_vdmpyhvsat_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
+
+def int_hexagon_V6_vdmpyhvsat_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
+
+def int_hexagon_V6_vrmpybv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
+
+def int_hexagon_V6_vrmpybv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
+
+def int_hexagon_V6_vaddhsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhsat">;
+
+def int_hexagon_V6_vaddhsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcombine,VD_ftype_VIVI,2)
-// tag : V6_vcombine
def int_hexagon_V6_vcombine :
-Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vcombine">;
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcombine">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcombine_128B,VD_ftype_VIVI,2)
-// tag : V6_vcombine_128B
def int_hexagon_V6_vcombine_128B :
-Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vcombine_128B">;
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcombine_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdelta,VI_ftype_VIVI,2)
-// tag : V6_vdelta
-def int_hexagon_V6_vdelta :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdelta">;
+def int_hexagon_V6_vandqrt_acc :
+Hexagon_v16i32_v16i32v512i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdelta_128B,VI_ftype_VIVI,2)
-// tag : V6_vdelta_128B
-def int_hexagon_V6_vdelta_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdelta_128B">;
+def int_hexagon_V6_vandqrt_acc_128B :
+Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrdelta,VI_ftype_VIVI,2)
-// tag : V6_vrdelta
-def int_hexagon_V6_vrdelta :
-Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrdelta">;
+def int_hexagon_V6_vaslhv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslhv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrdelta_128B,VI_ftype_VIVI,2)
-// tag : V6_vrdelta_128B
-def int_hexagon_V6_vrdelta_128B :
-Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
+def int_hexagon_V6_vaslhv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcl0w,VI_ftype_VI,1)
-// tag : V6_vcl0w
-def int_hexagon_V6_vcl0w :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0w">;
+def int_hexagon_V6_vinsertwr :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vinsertwr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcl0w_128B,VI_ftype_VI,1)
-// tag : V6_vcl0w_128B
-def int_hexagon_V6_vcl0w_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
+def int_hexagon_V6_vinsertwr_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
+
+def int_hexagon_V6_vsubh_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_dv">;
+
+def int_hexagon_V6_vsubh_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
+
+def int_hexagon_V6_vshuffb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffb">;
+
+def int_hexagon_V6_vshuffb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
+
+def int_hexagon_V6_vand :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vand">;
+
+def int_hexagon_V6_vand_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vand_128B">;
+
+def int_hexagon_V6_vmpyhv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv">;
+
+def int_hexagon_V6_vmpyhv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
+
+def int_hexagon_V6_vdmpyhsuisat_acc :
+Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
+
+def int_hexagon_V6_vdmpyhsuisat_acc_128B :
+Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
+
+def int_hexagon_V6_vsububsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
+
+def int_hexagon_V6_vsububsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
+
+def int_hexagon_V6_vgtb_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_xor">;
+
+def int_hexagon_V6_vgtb_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
+
+def int_hexagon_V6_vdsaduh_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
+
+def int_hexagon_V6_vdsaduh_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
+
+def int_hexagon_V6_vrmpyub :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub">;
+
+def int_hexagon_V6_vrmpyub_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
+
+def int_hexagon_V6_vmpyuh_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
+
+def int_hexagon_V6_vmpyuh_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcl0h,VI_ftype_VI,1)
-// tag : V6_vcl0h
def int_hexagon_V6_vcl0h :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0h">;
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0h">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vcl0h_128B,VI_ftype_VI,1)
-// tag : V6_vcl0h_128B
def int_hexagon_V6_vcl0h_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnormamtw,VI_ftype_VI,1)
-// tag : V6_vnormamtw
-def int_hexagon_V6_vnormamtw :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamtw">;
+def int_hexagon_V6_vmpyhus_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnormamtw_128B,VI_ftype_VI,1)
-// tag : V6_vnormamtw_128B
-def int_hexagon_V6_vnormamtw_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
+def int_hexagon_V6_vmpyhus_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnormamth,VI_ftype_VI,1)
-// tag : V6_vnormamth
-def int_hexagon_V6_vnormamth :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamth">;
+def int_hexagon_V6_vmpybv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnormamth_128B,VI_ftype_VI,1)
-// tag : V6_vnormamth_128B
-def int_hexagon_V6_vnormamth_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
+def int_hexagon_V6_vmpybv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
+
+def int_hexagon_V6_vrsadubi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi">;
+
+def int_hexagon_V6_vrsadubi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_128B">;
+
+def int_hexagon_V6_vdmpyhb_dv_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
+
+def int_hexagon_V6_vdmpyhb_dv_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
+
+def int_hexagon_V6_vshufeh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufeh">;
+
+def int_hexagon_V6_vshufeh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
+
+def int_hexagon_V6_vmpyewuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh">;
+
+def int_hexagon_V6_vmpyewuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
+
+def int_hexagon_V6_vmpyhsrs :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
+
+def int_hexagon_V6_vmpyhsrs_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
+
+def int_hexagon_V6_vdmpybus_dv_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
+
+def int_hexagon_V6_vdmpybus_dv_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
+
+def int_hexagon_V6_vaddubh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh">;
+
+def int_hexagon_V6_vaddubh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
+
+def int_hexagon_V6_vasrwh :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwh">;
+
+def int_hexagon_V6_vasrwh_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
+
+def int_hexagon_V6_ld0 :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ld0">;
+
+def int_hexagon_V6_ld0_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ld0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpopcounth,VI_ftype_VI,1)
-// tag : V6_vpopcounth
def int_hexagon_V6_vpopcounth :
-Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vpopcounth">;
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vpopcounth">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vpopcounth_128B,VI_ftype_VI,1)
-// tag : V6_vpopcounth_128B
def int_hexagon_V6_vpopcounth_128B :
-Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
+
+def int_hexagon_V6_ldnt0 :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_ldnt0">;
+
+def int_hexagon_V6_ldnt0_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_ldnt0_128B">;
+
+def int_hexagon_V6_vgth_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_and">;
+
+def int_hexagon_V6_vgth_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
+
+def int_hexagon_V6_vaddubsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
+
+def int_hexagon_V6_vaddubsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
+
+def int_hexagon_V6_vpackeh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeh">;
+
+def int_hexagon_V6_vpackeh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
+
+def int_hexagon_V6_vmpyh :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh">;
+
+def int_hexagon_V6_vmpyh_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
+
+def int_hexagon_V6_vminh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminh">;
+
+def int_hexagon_V6_vminh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminh_128B">;
+
+def int_hexagon_V6_pred_scalar2 :
+Hexagon_v512i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2">;
+
+def int_hexagon_V6_pred_scalar2_128B :
+Hexagon_v1024i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
+
+def int_hexagon_V6_vdealh :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealh">;
+
+def int_hexagon_V6_vdealh_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealh_128B">;
+
+def int_hexagon_V6_vpackwh_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
+
+def int_hexagon_V6_vpackwh_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
+
+def int_hexagon_V6_vaslh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslh">;
+
+def int_hexagon_V6_vaslh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_128B">;
+
+def int_hexagon_V6_vgtuw_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_and">;
+
+def int_hexagon_V6_vgtuw_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
+
+def int_hexagon_V6_vor :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vor">;
+
+def int_hexagon_V6_vor_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vor_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvb
def int_hexagon_V6_vlutvvb :
-Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb">;
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_128B,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvb_128B
def int_hexagon_V6_vlutvvb_128B :
-Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
+
+def int_hexagon_V6_vmpyiowh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiowh">;
+
+def int_hexagon_V6_vmpyiowh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc,VI_ftype_VIVIVISI,4)
-// tag : V6_vlutvvb_oracc
def int_hexagon_V6_vlutvvb_oracc :
-Hexagon_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
+Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc_128B,VI_ftype_VIVIVISI,4)
-// tag : V6_vlutvvb_oracc_128B
def int_hexagon_V6_vlutvvb_oracc_128B :
-Hexagon_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
+Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwh
-def int_hexagon_V6_vlutvwh :
-Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh">;
+def int_hexagon_V6_vandvrt :
+Hexagon_v512i1_v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_128B,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwh_128B
-def int_hexagon_V6_vlutvwh_128B :
-Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
+def int_hexagon_V6_vandvrt_128B :
+Hexagon_v1024i1_v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc,VD_ftype_VDVIVISI,4)
-// tag : V6_vlutvwh_oracc
-def int_hexagon_V6_vlutvwh_oracc :
-Hexagon_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
+def int_hexagon_V6_veqh_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc_128B,VD_ftype_VDVIVISI,4)
-// tag : V6_vlutvwh_oracc_128B
-def int_hexagon_V6_vlutvwh_oracc_128B :
-Hexagon_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
+def int_hexagon_V6_veqh_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
-//
-// Masked vector stores
-//
-def int_hexagon_V6_vS32b_qpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai">;
+def int_hexagon_V6_vadduhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw">;
-def int_hexagon_V6_vS32b_nqpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai">;
+def int_hexagon_V6_vadduhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
-def int_hexagon_V6_vS32b_nt_qpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai">;
+def int_hexagon_V6_vcl0w :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0w">;
-def int_hexagon_V6_vS32b_nt_nqpred_ai :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai">;
+def int_hexagon_V6_vcl0w_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
-def int_hexagon_V6_vS32b_qpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B">;
+def int_hexagon_V6_vmpyihb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb">;
-def int_hexagon_V6_vS32b_nqpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B">;
+def int_hexagon_V6_vmpyihb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
-def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B">;
+def int_hexagon_V6_vtmpybus :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus">;
-def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B">;
+def int_hexagon_V6_vtmpybus_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
-def int_hexagon_V6_vmaskedstoreq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstoreq">;
+def int_hexagon_V6_vd0 :
+Hexagon_v16i32__Intrinsic<"HEXAGON_V6_vd0">;
-def int_hexagon_V6_vmaskedstorenq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorenq">;
+def int_hexagon_V6_vd0_128B :
+Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vd0_128B">;
-def int_hexagon_V6_vmaskedstorentq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentq">;
+def int_hexagon_V6_veqh_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_or">;
-def int_hexagon_V6_vmaskedstorentnq :
-Hexagon_vv64ivmemv512_Intrinsic<"HEXAGON_V6_vmaskedstorentnq">;
+def int_hexagon_V6_veqh_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
-def int_hexagon_V6_vmaskedstoreq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstoreq_128B">;
+def int_hexagon_V6_vgtw_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_or">;
-def int_hexagon_V6_vmaskedstorenq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorenq_128B">;
+def int_hexagon_V6_vgtw_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
-def int_hexagon_V6_vmaskedstorentq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentq_128B">;
+def int_hexagon_V6_vdmpybus :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus">;
-def int_hexagon_V6_vmaskedstorentnq_128B :
-Hexagon_vv128ivmemv1024_Intrinsic<"HEXAGON_V6_vmaskedstorentnq_128B">;
+def int_hexagon_V6_vdmpybus_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
-multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
- def NAME#_pci : Hexagon_NonGCC_Intrinsic<
- [ElTy, llvm_ptr_ty],
- [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<3>]>;
- def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
- [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<2>]>;
-}
+def int_hexagon_V6_vgtub_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_or">;
-defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_L2_loadrb : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_L2_loadruh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_L2_loadrh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_L2_loadri : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_L2_loadrd : Hexagon_custom_circ_ld_Intrinsic<llvm_i64_ty>;
+def int_hexagon_V6_vgtub_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
-multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
- def NAME#_pci : Hexagon_NonGCC_Intrinsic<
- [llvm_ptr_ty],
- [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<4>]>;
- def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
- [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
- [IntrArgMemOnly, NoCapture<3>]>;
-}
+def int_hexagon_V6_vmpybus :
+Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus">;
-defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_S2_storerh : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
-defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;
+def int_hexagon_V6_vmpybus_128B :
+Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
-// The front-end emits the intrinsic call with only two arguments. The third
-// argument from the builtin is already used by front-end to write to memory
-// by generating a store.
-class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
- : Hexagon_NonGCC_Intrinsic<
- [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+def int_hexagon_V6_vdmpyhb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
-def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
-def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
-def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
-def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
-def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
-def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;
+def int_hexagon_V6_vdmpyhb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
-def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
-def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
-def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
-def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
-def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
+def int_hexagon_V6_vandvrt_acc :
+Hexagon_v512i1_v512i1v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
+def int_hexagon_V6_vandvrt_acc_128B :
+Hexagon_v1024i1_v1024i1v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
-///
-/// HexagonV62 intrinsics
-///
+def int_hexagon_V6_vassign :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign">;
-//
-// Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
-// tag : M6_vabsdiffb
-class Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vassign_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_128B">;
-//
-// Hexagon_LLii_Intrinsic<string GCCIntSuffix>
-// tag : S6_vsplatrbp
-class Hexagon_LLii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i64_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddwnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwnq">;
-//
-// Hexagon_V62_v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlsrb
-class Hexagon_V62_v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddwnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
-//
-// Hexagon_V62_v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlsrb_128B
-class Hexagon_V62_v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vgtub_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_and">;
-//
-// Hexagon_V62_v512v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vasrwuhrndsat
-class Hexagon_V62_v512v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vgtub_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
-//
-// Hexagon_V62_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vasrwuhrndsat_128B
-class Hexagon_V62_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vdmpyhb_dv :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
-//
-// Hexagon_V62_v512v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrounduwuh
-class Hexagon_V62_v512v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vdmpyhb_dv_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
-//
-// Hexagon_V62_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrounduwuh_128B
-class Hexagon_V62_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vunpackb :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackb">;
-//
-// Hexagon_V62_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
-// tag : V6_vadduwsat_dv_128B
-class Hexagon_V62_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vunpackb_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
-//
-// Hexagon_V62_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddhw_acc
-class Hexagon_V62_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vunpackh :
+Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackh">;
-//
-// Hexagon_V62_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vaddhw_acc_128B
-class Hexagon_V62_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vunpackh_128B :
+Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
-//
-// Hexagon_V62_v1024v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyewuh_64
-class Hexagon_V62_v1024v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpahb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
-//
-// Hexagon_V62_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyewuh_64_128B
-class Hexagon_V62_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpahb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
-//
-// Hexagon_V62_v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpauhb_128B
-class Hexagon_V62_v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddbnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbnq">;
-//
-// Hexagon_V62_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpauhb_acc_128B
-class Hexagon_V62_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddbnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
-//
-// Hexagon_V62_v512v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandnqrt
-class Hexagon_V62_v512v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vlalignbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignbi">;
-//
-// Hexagon_V62_v1024v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandnqrt_128B
-class Hexagon_V62_v1024v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vlalignbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignbi_128B">;
-//
-// Hexagon_V62_v512v512v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandnqrt_acc
-class Hexagon_V62_v512v512v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vsatwh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatwh">;
-//
-// Hexagon_V62_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandnqrt_acc_128B
-class Hexagon_V62_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vsatwh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
-//
-// Hexagon_V62_v512v64iv512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvqv
-class Hexagon_V62_v512v64iv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vgtuh :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh">;
-//
-// Hexagon_V62_v1024v128iv1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vandvqv_128B
-class Hexagon_V62_v1024v128iv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vgtuh_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
-//
-// Hexagon_V62_v64ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_scalar2v2
-class Hexagon_V62_v64ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyihb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
-//
-// Hexagon_V62_v128ii_Intrinsic<string GCCIntSuffix>
-// tag : V6_pred_scalar2v2_128B
-class Hexagon_V62_v128ii_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyihb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
-//
-// Hexagon_V62_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
-// tag : V6_shuffeqw
-class Hexagon_V62_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrmpybusv_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
-//
-// Hexagon_V62_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
-// tag : V6_shuffeqw_128B
-class Hexagon_V62_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrmpybusv_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
-//
-// Hexagon_V62_v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_lvsplath
-class Hexagon_V62_v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrdelta :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrdelta">;
-//
-// Hexagon_V62_v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_lvsplath_128B
-class Hexagon_V62_v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrdelta_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
-//
-// Hexagon_V62_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvvb_oracci
-class Hexagon_V62_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vroundwh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwh">;
-//
-// Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvvb_oracci_128B
-class Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vroundwh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
-//
-// Hexagon_V62_v1024v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvwhi
-class Hexagon_V62_v1024v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddw_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_dv">;
-//
-// Hexagon_V62_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvwhi_128B
-class Hexagon_V62_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddw_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
-//
-// Hexagon_V62_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvwh_oracci
-class Hexagon_V62_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyiwb_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
-//
-// Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlutvwh_oracci_128B
-class Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyiwb_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
-// Hexagon_v512v64iv512v512v64i_Intrinsic<string GCCIntSuffix>
-// tag: V6_vaddcarry
-class Hexagon_v512v64iv512v512v64i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty, llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v512i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vsubbq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbq">;
-// Hexagon_v1024v128iv1024v1024v128i_Intrinsic<string GCCIntSuffix>
-// tag: V6_vaddcarry_128B
-class Hexagon_v1024v128iv1024v1024v128i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty, llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v1024i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vsubbq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
+def int_hexagon_V6_veqh_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_and">;
-//
-// BUILTIN_INFO(HEXAGON.M6_vabsdiffb,DI_ftype_DIDI,2)
-// tag : M6_vabsdiffb
-def int_hexagon_M6_vabsdiffb :
-Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffb">;
+def int_hexagon_V6_veqh_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
-//
-// BUILTIN_INFO(HEXAGON.M6_vabsdiffub,DI_ftype_DIDI,2)
-// tag : M6_vabsdiffub
-def int_hexagon_M6_vabsdiffub :
-Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffub">;
+def int_hexagon_V6_valignbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignbi">;
-//
-// BUILTIN_INFO(HEXAGON.S6_vtrunehb_ppp,DI_ftype_DIDI,2)
-// tag : S6_vtrunehb_ppp
-def int_hexagon_S6_vtrunehb_ppp :
-Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
+def int_hexagon_V6_valignbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignbi_128B">;
-//
-// BUILTIN_INFO(HEXAGON.S6_vtrunohb_ppp,DI_ftype_DIDI,2)
-// tag : S6_vtrunohb_ppp
-def int_hexagon_S6_vtrunohb_ppp :
-Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
+def int_hexagon_V6_vaddwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwsat">;
-//
-// BUILTIN_INFO(HEXAGON.S6_vsplatrbp,DI_ftype_SI,1)
-// tag : S6_vsplatrbp
-def int_hexagon_S6_vsplatrbp :
-Hexagon_LLii_Intrinsic<"HEXAGON_S6_vsplatrbp">;
+def int_hexagon_V6_vaddwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrb,VI_ftype_VISI,2)
-// tag : V6_vlsrb
-def int_hexagon_V6_vlsrb :
-Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vlsrb">;
+def int_hexagon_V6_veqw_and :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_and">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlsrb_128B,VI_ftype_VISI,2)
-// tag : V6_vlsrb_128B
-def int_hexagon_V6_vlsrb_128B :
-Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrb_128B">;
+def int_hexagon_V6_veqw_and_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrwuhrndsat
-def int_hexagon_V6_vasrwuhrndsat :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;
+def int_hexagon_V6_vabsdiffub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrwuhrndsat_128B
-def int_hexagon_V6_vasrwuhrndsat_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;
+def int_hexagon_V6_vabsdiffub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasruwuhrndsat
-def int_hexagon_V6_vasruwuhrndsat :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;
+def int_hexagon_V6_vshuffeb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffeb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasruwuhrndsat_128B
-def int_hexagon_V6_vasruwuhrndsat_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;
+def int_hexagon_V6_vshuffeb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhbsat,VI_ftype_VIVISI,3)
-// tag : V6_vasrhbsat
-def int_hexagon_V6_vasrhbsat :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbsat">;
+def int_hexagon_V6_vabsdiffuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrhbsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrhbsat_128B
-def int_hexagon_V6_vasrhbsat_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;
+def int_hexagon_V6_vabsdiffuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrounduwuh,VI_ftype_VIVI,2)
-// tag : V6_vrounduwuh
-def int_hexagon_V6_vrounduwuh :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduwuh">;
+def int_hexagon_V6_veqw_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrounduwuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vrounduwuh_128B
-def int_hexagon_V6_vrounduwuh_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;
+def int_hexagon_V6_veqw_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrounduhub,VI_ftype_VIVI,2)
-// tag : V6_vrounduhub
-def int_hexagon_V6_vrounduhub :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduhub">;
+def int_hexagon_V6_vgth :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrounduhub_128B,VI_ftype_VIVI,2)
-// tag : V6_vrounduhub_128B
-def int_hexagon_V6_vrounduhub_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;
+def int_hexagon_V6_vgth_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduwsat,VI_ftype_VIVI,2)
-// tag : V6_vadduwsat
-def int_hexagon_V6_vadduwsat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vadduwsat">;
+def int_hexagon_V6_vgtuw_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduwsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vadduwsat_128B
-def int_hexagon_V6_vadduwsat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;
+def int_hexagon_V6_vgtuw_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vadduwsat_dv
-def int_hexagon_V6_vadduwsat_dv :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;
+def int_hexagon_V6_vgtb :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vadduwsat_dv_128B
-def int_hexagon_V6_vadduwsat_dv_128B :
-Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
+def int_hexagon_V6_vgtb_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuwsat,VI_ftype_VIVI,2)
-// tag : V6_vsubuwsat
-def int_hexagon_V6_vsubuwsat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuwsat">;
+def int_hexagon_V6_vgtw :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubuwsat_128B
-def int_hexagon_V6_vsubuwsat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;
+def int_hexagon_V6_vgtw_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubuwsat_dv
-def int_hexagon_V6_vsubuwsat_dv :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;
+def int_hexagon_V6_vsubwq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubuwsat_dv_128B
-def int_hexagon_V6_vsubuwsat_dv_128B :
-Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;
+def int_hexagon_V6_vsubwq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbsat,VI_ftype_VIVI,2)
-// tag : V6_vaddbsat
-def int_hexagon_V6_vaddbsat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddbsat">;
+def int_hexagon_V6_vnot :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnot">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddbsat_128B
-def int_hexagon_V6_vaddbsat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;
+def int_hexagon_V6_vnot_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnot_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vaddbsat_dv
-def int_hexagon_V6_vaddbsat_dv :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;
+def int_hexagon_V6_vgtb_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vaddbsat_dv_128B
-def int_hexagon_V6_vaddbsat_dv_128B :
-Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;
+def int_hexagon_V6_vgtb_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbsat,VI_ftype_VIVI,2)
-// tag : V6_vsubbsat
-def int_hexagon_V6_vsubbsat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubbsat">;
+def int_hexagon_V6_vgtuw_or :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_or">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbsat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubbsat_128B
-def int_hexagon_V6_vsubbsat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;
+def int_hexagon_V6_vgtuw_or_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv,VD_ftype_VDVD,2)
-// tag : V6_vsubbsat_dv
-def int_hexagon_V6_vsubbsat_dv :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;
+def int_hexagon_V6_vaddubsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv_128B,VD_ftype_VDVD,2)
-// tag : V6_vsubbsat_dv_128B
-def int_hexagon_V6_vsubbsat_dv_128B :
-Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;
+def int_hexagon_V6_vaddubsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat,VI_ftype_VIVI,2)
-// tag : V6_vaddububb_sat
-def int_hexagon_V6_vaddububb_sat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddububb_sat">;
+def int_hexagon_V6_vmaxw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddububb_sat_128B
-def int_hexagon_V6_vaddububb_sat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;
+def int_hexagon_V6_vmaxw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat,VI_ftype_VIVI,2)
-// tag : V6_vsubububb_sat
-def int_hexagon_V6_vsubububb_sat :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubububb_sat">;
+def int_hexagon_V6_vaslwv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslwv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat_128B,VI_ftype_VIVI,2)
-// tag : V6_vsubububb_sat_128B
-def int_hexagon_V6_vsubububb_sat_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;
+def int_hexagon_V6_vaslwv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vaddhw_acc
-def int_hexagon_V6_vaddhw_acc :
-Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw_acc">;
+def int_hexagon_V6_vabsw_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vaddhw_acc_128B
-def int_hexagon_V6_vaddhw_acc_128B :
-Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;
+def int_hexagon_V6_vabsw_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vadduhw_acc
-def int_hexagon_V6_vadduhw_acc :
-Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw_acc">;
+def int_hexagon_V6_vsubwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vadduhw_acc_128B
-def int_hexagon_V6_vadduhw_acc_128B :
-Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;
+def int_hexagon_V6_vsubwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vaddubh_acc
-def int_hexagon_V6_vaddubh_acc :
-Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh_acc">;
+def int_hexagon_V6_vroundhub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vaddubh_acc_128B
-def int_hexagon_V6_vaddubh_acc_128B :
-Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;
+def int_hexagon_V6_vroundhub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64,VD_ftype_VIVI,2)
-// tag : V6_vmpyewuh_64
-def int_hexagon_V6_vmpyewuh_64 :
-Hexagon_V62_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;
+def int_hexagon_V6_vdmpyhisat_acc :
+Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64_128B,VD_ftype_VIVI,2)
-// tag : V6_vmpyewuh_64_128B
-def int_hexagon_V6_vmpyewuh_64_128B :
-Hexagon_V62_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;
+def int_hexagon_V6_vdmpyhisat_acc_128B :
+Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyowh_64_acc
-def int_hexagon_V6_vmpyowh_64_acc :
-Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;
+def int_hexagon_V6_vmpabus :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc_128B,VD_ftype_VDVIVI,3)
-// tag : V6_vmpyowh_64_acc_128B
-def int_hexagon_V6_vmpyowh_64_acc_128B :
-Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;
+def int_hexagon_V6_vmpabus_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhb,VD_ftype_VDSI,2)
-// tag : V6_vmpauhb
-def int_hexagon_V6_vmpauhb :
-Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb">;
+def int_hexagon_V6_vassignp :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassignp">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhb_128B,VD_ftype_VDSI,2)
-// tag : V6_vmpauhb_128B
-def int_hexagon_V6_vmpauhb_128B :
-Hexagon_V62_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;
+def int_hexagon_V6_vassignp_128B :
+Hexagon_v64i32_v64i32_Intrinsic<"HEXAGON_V6_vassignp_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vmpauhb_acc
-def int_hexagon_V6_vmpauhb_acc :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;
+def int_hexagon_V6_veqb :
+Hexagon_v512i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vmpauhb_acc_128B
-def int_hexagon_V6_vmpauhb_acc_128B :
-Hexagon_V62_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;
+def int_hexagon_V6_veqb_128B :
+Hexagon_v1024i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwub,VI_ftype_VISI,2)
-// tag : V6_vmpyiwub
-def int_hexagon_V6_vmpyiwub :
-Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub">;
+def int_hexagon_V6_vsububh :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyiwub_128B
-def int_hexagon_V6_vmpyiwub_128B :
-Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;
+def int_hexagon_V6_vsububh_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwub_acc
-def int_hexagon_V6_vmpyiwub_acc :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;
+def int_hexagon_V6_lvsplatw :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vmpyiwub_acc_128B
-def int_hexagon_V6_vmpyiwub_acc_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;
+def int_hexagon_V6_lvsplatw_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandnqrt,VI_ftype_QVSI,2)
-// tag : V6_vandnqrt
-def int_hexagon_V6_vandnqrt :
-Hexagon_V62_v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt">;
+def int_hexagon_V6_vaddhnq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhnq">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandnqrt_128B,VI_ftype_QVSI,2)
-// tag : V6_vandnqrt_128B
-def int_hexagon_V6_vandnqrt_128B :
-Hexagon_V62_v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;
+def int_hexagon_V6_vaddhnq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
+
+def int_hexagon_V6_vdmpyhsusat :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
+
+def int_hexagon_V6_vdmpyhsusat_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
+
+def int_hexagon_V6_pred_not :
+Hexagon_v512i1_v512i1_Intrinsic<"HEXAGON_V6_pred_not">;
+
+def int_hexagon_V6_pred_not_128B :
+Hexagon_v1024i1_v1024i1_Intrinsic<"HEXAGON_V6_pred_not_128B">;
+
+def int_hexagon_V6_vlutvwh_oracc :
+Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
+
+def int_hexagon_V6_vlutvwh_oracc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
+
+def int_hexagon_V6_vmpyiewh_acc :
+Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
+
+def int_hexagon_V6_vmpyiewh_acc_128B :
+Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
+
+def int_hexagon_V6_vdealvdd :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdealvdd">;
+
+def int_hexagon_V6_vdealvdd_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
+
+def int_hexagon_V6_vavgw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgw">;
+
+def int_hexagon_V6_vavgw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgw_128B">;
+
+def int_hexagon_V6_vdmpyhsusat_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
+
+def int_hexagon_V6_vdmpyhsusat_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
+
+def int_hexagon_V6_vgtw_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_xor">;
+
+def int_hexagon_V6_vgtw_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
+
+def int_hexagon_V6_vtmpyhb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
+
+def int_hexagon_V6_vtmpyhb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
+
+def int_hexagon_V6_vaddhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw">;
+
+def int_hexagon_V6_vaddhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
+
+def int_hexagon_V6_vaddhq :
+Hexagon_v16i32_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhq">;
+
+def int_hexagon_V6_vaddhq_128B :
+Hexagon_v32i32_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
+
+def int_hexagon_V6_vrmpyubv :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv">;
+
+def int_hexagon_V6_vrmpyubv_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
+
+def int_hexagon_V6_vsubh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubh">;
+
+def int_hexagon_V6_vsubh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_128B">;
+
+def int_hexagon_V6_vrmpyubi :
+Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi">;
+
+def int_hexagon_V6_vrmpyubi_128B :
+Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_128B">;
+
+def int_hexagon_V6_vminw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminw">;
+
+def int_hexagon_V6_vminw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminw_128B">;
+
+def int_hexagon_V6_vmpyubv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
+
+def int_hexagon_V6_vmpyubv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
+
+def int_hexagon_V6_pred_xor :
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_pred_xor">;
+
+def int_hexagon_V6_pred_xor_128B :
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
+
+def int_hexagon_V6_veqb_xor :
+Hexagon_v512i1_v512i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_xor">;
+
+def int_hexagon_V6_veqb_xor_128B :
+Hexagon_v1024i1_v1024i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
+
+def int_hexagon_V6_vmpyiewuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
+
+def int_hexagon_V6_vmpyiewuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
+
+def int_hexagon_V6_vmpybusv_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
+
+def int_hexagon_V6_vmpybusv_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
+
+def int_hexagon_V6_vavguhrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguhrnd">;
+
+def int_hexagon_V6_vavguhrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
+
+def int_hexagon_V6_vmpyowh_rnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
+
+def int_hexagon_V6_vmpyowh_rnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
+
+def int_hexagon_V6_vsubwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwsat">;
+
+def int_hexagon_V6_vsubwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
+
+def int_hexagon_V6_vsubuhw :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhw">;
+
+def int_hexagon_V6_vsubuhw_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
+
+def int_hexagon_V6_vrmpybusi_acc :
+Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc">;
+
+def int_hexagon_V6_vrmpybusi_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B">;
+
+def int_hexagon_V6_vasrw :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrw">;
+
+def int_hexagon_V6_vasrw_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_128B">;
+
+def int_hexagon_V6_vasrh :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrh">;
+
+def int_hexagon_V6_vasrh_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_128B">;
+
+def int_hexagon_V6_vmpyuhv :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv">;
+
+def int_hexagon_V6_vmpyuhv_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
+
+def int_hexagon_V6_vasrhbrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
+
+def int_hexagon_V6_vasrhbrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
+
+def int_hexagon_V6_vsubuhsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
+
+def int_hexagon_V6_vsubuhsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
+
+def int_hexagon_V6_vabsdiffw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffw">;
+
+def int_hexagon_V6_vabsdiffw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
+
+// V62 HVX Instructions.
-//
-// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc,VI_ftype_VIQVSI,3)
-// tag : V6_vandnqrt_acc
def int_hexagon_V6_vandnqrt_acc :
-Hexagon_V62_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;
+Hexagon_v16i32_v16i32v512i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc_128B,VI_ftype_VIQVSI,3)
-// tag : V6_vandnqrt_acc_128B
def int_hexagon_V6_vandnqrt_acc_128B :
-Hexagon_V62_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;
+Hexagon_v32i32_v32i32v1024i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvqv,VI_ftype_QVVI,2)
-// tag : V6_vandvqv
-def int_hexagon_V6_vandvqv :
-Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvqv">;
+def int_hexagon_V6_vaddclbh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvqv_128B,VI_ftype_QVVI,2)
-// tag : V6_vandvqv_128B
-def int_hexagon_V6_vandvqv_128B :
-Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvqv_128B">;
+def int_hexagon_V6_vaddclbh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvnqv,VI_ftype_QVVI,2)
-// tag : V6_vandvnqv
-def int_hexagon_V6_vandvnqv :
-Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvnqv">;
+def int_hexagon_V6_vmpyowh_64_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vandvnqv_128B,VI_ftype_QVVI,2)
-// tag : V6_vandvnqv_128B
-def int_hexagon_V6_vandvnqv_128B :
-Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;
+def int_hexagon_V6_vmpyowh_64_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2,QV_ftype_SI,1)
-// tag : V6_pred_scalar2v2
-def int_hexagon_V6_pred_scalar2v2 :
-Hexagon_V62_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;
+def int_hexagon_V6_vmpyewuh_64 :
+Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;
-//
-// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2_128B,QV_ftype_SI,1)
-// tag : V6_pred_scalar2v2_128B
-def int_hexagon_V6_pred_scalar2v2_128B :
-Hexagon_V62_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;
+def int_hexagon_V6_vmpyewuh_64_128B :
+Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_shuffeqw,QV_ftype_QVQV,2)
-// tag : V6_shuffeqw
-def int_hexagon_V6_shuffeqw :
-Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqw">;
+def int_hexagon_V6_vsatuwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatuwuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_shuffeqw_128B,QV_ftype_QVQV,2)
-// tag : V6_shuffeqw_128B
-def int_hexagon_V6_shuffeqw_128B :
-Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;
+def int_hexagon_V6_vsatuwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_shuffeqh,QV_ftype_QVQV,2)
-// tag : V6_shuffeqh
def int_hexagon_V6_shuffeqh :
-Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqh">;
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_shuffeqh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_shuffeqh_128B,QV_ftype_QVQV,2)
-// tag : V6_shuffeqh_128B
def int_hexagon_V6_shuffeqh_128B :
-Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxb,VI_ftype_VIVI,2)
-// tag : V6_vmaxb
-def int_hexagon_V6_vmaxb :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxb">;
+def int_hexagon_V6_shuffeqw :
+Hexagon_v512i1_v512i1v512i1_Intrinsic<"HEXAGON_V6_shuffeqw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmaxb_128B,VI_ftype_VIVI,2)
-// tag : V6_vmaxb_128B
-def int_hexagon_V6_vmaxb_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxb_128B">;
+def int_hexagon_V6_shuffeqw_128B :
+Hexagon_v1024i1_v1024i1v1024i1_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;
+
+def int_hexagon_V6_ldcnpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnpnt0">;
+
+def int_hexagon_V6_ldcnpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnpnt0_128B">;
+
+def int_hexagon_V6_vsubcarry :
+Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic;
+
+def int_hexagon_V6_vsubcarry_128B :
+Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B;
+
+def int_hexagon_V6_vasrhbsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat">;
+
+def int_hexagon_V6_vasrhbsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminb,VI_ftype_VIVI,2)
-// tag : V6_vminb
def int_hexagon_V6_vminb :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vminb">;
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vminb_128B,VI_ftype_VIVI,2)
-// tag : V6_vminb_128B
def int_hexagon_V6_vminb_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminb_128B">;
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsatuwuh,VI_ftype_VIVI,2)
-// tag : V6_vsatuwuh
-def int_hexagon_V6_vsatuwuh :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsatuwuh">;
+def int_hexagon_V6_vmpauhb_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsatuwuh_128B,VI_ftype_VIVI,2)
-// tag : V6_vsatuwuh_128B
-def int_hexagon_V6_vsatuwuh_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;
+def int_hexagon_V6_vmpauhb_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplath,VI_ftype_SI,1)
-// tag : V6_lvsplath
-def int_hexagon_V6_lvsplath :
-Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplath">;
+def int_hexagon_V6_vaddhw_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplath_128B,VI_ftype_SI,1)
-// tag : V6_lvsplath_128B
-def int_hexagon_V6_lvsplath_128B :
-Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
+def int_hexagon_V6_vaddhw_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplatb,VI_ftype_SI,1)
-// tag : V6_lvsplatb
-def int_hexagon_V6_lvsplatb :
-Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplatb">;
+def int_hexagon_V6_vlsrb :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_lvsplatb_128B,VI_ftype_SI,1)
-// tag : V6_lvsplatb_128B
-def int_hexagon_V6_lvsplatb_128B :
-Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;
+def int_hexagon_V6_vlsrb_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddclbw,VI_ftype_VIVI,2)
-// tag : V6_vaddclbw
-def int_hexagon_V6_vaddclbw :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbw">;
+def int_hexagon_V6_vlutvwhi :
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddclbw_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddclbw_128B
-def int_hexagon_V6_vaddclbw_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;
+def int_hexagon_V6_vlutvwhi_128B :
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddclbh,VI_ftype_VIVI,2)
-// tag : V6_vaddclbh
-def int_hexagon_V6_vaddclbh :
-Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbh">;
+def int_hexagon_V6_vaddububb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddububb_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddclbh_128B,VI_ftype_VIVI,2)
-// tag : V6_vaddclbh_128B
-def int_hexagon_V6_vaddclbh_128B :
-Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;
+def int_hexagon_V6_vaddububb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvbi,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvbi
-def int_hexagon_V6_vlutvvbi :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvbi">;
+def int_hexagon_V6_vsubbsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvbi_128B,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvbi_128B
-def int_hexagon_V6_vlutvvbi_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvbi_128B">;
+def int_hexagon_V6_vsubbsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;
+
+def int_hexagon_V6_ldtp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtp0">;
+
+def int_hexagon_V6_ldtp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtp0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci,VI_ftype_VIVIVISI,4)
-// tag : V6_vlutvvb_oracci
def int_hexagon_V6_vlutvvb_oracci :
-Hexagon_V62_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci">;
+Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci_128B,VI_ftype_VIVIVISI,4)
-// tag : V6_vlutvvb_oracci_128B
def int_hexagon_V6_vlutvvb_oracci_128B :
-Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B">;
+Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwhi,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwhi
-def int_hexagon_V6_vlutvwhi :
-Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwhi">;
+def int_hexagon_V6_vsubuwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwhi_128B,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwhi_128B
-def int_hexagon_V6_vlutvwhi_128B :
-Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwhi_128B">;
+def int_hexagon_V6_vsubuwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci,VD_ftype_VDVIVISI,4)
-// tag : V6_vlutvwh_oracci
-def int_hexagon_V6_vlutvwh_oracci :
-Hexagon_V62_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci">;
+def int_hexagon_V6_ldpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldpnt0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci_128B,VD_ftype_VDVIVISI,4)
-// tag : V6_vlutvwh_oracci_128B
-def int_hexagon_V6_vlutvwh_oracci_128B :
-Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B">;
+def int_hexagon_V6_ldpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldpnt0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvb_nm
-def int_hexagon_V6_vlutvvb_nm :
-Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;
+def int_hexagon_V6_vandvnqv :
+Hexagon_v16i32_v512i1v16i32_Intrinsic<"HEXAGON_V6_vandvnqv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm_128B,VI_ftype_VIVISI,3)
-// tag : V6_vlutvvb_nm_128B
-def int_hexagon_V6_vlutvvb_nm_128B :
-Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;
+def int_hexagon_V6_vandvnqv_128B :
+Hexagon_v32i32_v1024i1v32i32_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;
+
+def int_hexagon_V6_lvsplatb :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb">;
+
+def int_hexagon_V6_lvsplatb_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;
+
+def int_hexagon_V6_lvsplath :
+Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplath">;
+
+def int_hexagon_V6_lvsplath_128B :
+Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplath_128B">;
+
+def int_hexagon_V6_ldtpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtpnt0">;
+
+def int_hexagon_V6_ldtpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtpnt0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwh_nm
def int_hexagon_V6_vlutvwh_nm :
-Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;
+Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm_128B,VD_ftype_VIVISI,3)
-// tag : V6_vlutvwh_nm_128B
def int_hexagon_V6_vlutvwh_nm_128B :
-Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;
-
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddcarry,VI_ftype_VIVIQV,3)
-// tag: V6_vaddcarry
-def int_hexagon_V6_vaddcarry :
-Hexagon_v512v64iv512v512v64i_Intrinsic<"HEXAGON_v6_vaddcarry">;
+Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaddcarry_128B,VI_ftype_VIVIQV,3)
-// tag: V6_vaddcarry_128B
-def int_hexagon_V6_vaddcarry_128B :
-Hexagon_v1024v128iv1024v1024v128i_Intrinsic<"HEXAGON_v6_vaddcarry_128B">;
+def int_hexagon_V6_ldnpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldnpnt0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubcarry,VI_ftype_VIVIQV,3)
-// tag: V6_vsubcarry
-def int_hexagon_V6_vsubcarry :
-Hexagon_v512v64iv512v512v64i_Intrinsic<"HEXAGON_v6_vsubcarry">;
+def int_hexagon_V6_ldnpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldnpnt0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vsubcarry_128B,VI_ftype_VIVIQV,3)
-// tag: V6_vsubcarry_128B
-def int_hexagon_V6_vsubcarry_128B :
-Hexagon_v1024v128iv1024v1024v128i_Intrinsic<"HEXAGON_v6_vsubcarry_128B">;
+def int_hexagon_V6_vmpauhb :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb">;
+def int_hexagon_V6_vmpauhb_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;
-///
-/// HexagonV65 intrinsics
-///
+def int_hexagon_V6_ldtnp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnp0">;
-//
-// Hexagon_V65_iLLiLLi_Intrinsic<string GCCIntSuffix>
-// tag : A6_vcmpbeq_notany
-class Hexagon_V65_iLLiLLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_ldtnp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnp0_128B">;
-//
-// Hexagon_V65_v1024v512LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyub_rtt
-class Hexagon_V65_v1024v512LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrounduhub :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduhub">;
-//
-// Hexagon_V65_v2048v1024LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyub_rtt_128B
-class Hexagon_V65_v2048v1024LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vrounduhub_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;
-//
-// Hexagon_V65_v1024v1024v512LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyub_rtt_acc
-class Hexagon_V65_v1024v1024v512LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vadduhw_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw_acc">;
-//
-// Hexagon_V65_v2048v2048v1024LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vrmpyub_rtt_acc_128B
-class Hexagon_V65_v2048v2048v1024LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vadduhw_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;
-//
-// Hexagon_V65_v512v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vasruwuhsat
-class Hexagon_V65_v512v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_ldcp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcp0">;
-//
-// Hexagon_V65_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vasruwuhsat_128B
-class Hexagon_V65_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_ldcp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcp0_128B">;
-//
-// Hexagon_V65_v512v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vavguw
-class Hexagon_V65_v512v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vadduwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduwsat">;
-//
-// Hexagon_V65_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vavguw_128B
-class Hexagon_V65_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vadduwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;
-//
-// Hexagon_V65_v512v512_Intrinsic<string GCCIntSuffix>
-// tag : V6_vabsb
-class Hexagon_V65_v512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_ldtnpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnpnt0">;
-//
-// Hexagon_V65_v1024v1024_Intrinsic<string GCCIntSuffix>
-// tag : V6_vabsb_128B
-class Hexagon_V65_v1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_ldtnpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldtnpnt0_128B">;
-//
-// Hexagon_V65_v1024v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpabuu
-class Hexagon_V65_v1024v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddbsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbsat">;
-//
-// Hexagon_V65_v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpabuu_128B
-class Hexagon_V65_v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddbsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;
-//
-// Hexagon_V65_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpabuu_acc_128B
-class Hexagon_V65_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vandnqrt :
+Hexagon_v16i32_v512i1i32_Intrinsic<"HEXAGON_V6_vandnqrt">;
-//
-// Hexagon_V65_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyh_acc
-class Hexagon_V65_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vandnqrt_128B :
+Hexagon_v32i32_v1024i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;
-//
-// Hexagon_V65_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyh_acc_128B
-class Hexagon_V65_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyiwub_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;
-//
-// Hexagon_V65_v512v512v512LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpahhsat
-class Hexagon_V65_v512v512v512LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmpyiwub_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;
-//
-// Hexagon_V65_v1024v1024v1024LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpahhsat_128B
-class Hexagon_V65_v1024v1024v1024LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaxb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxb">;
-//
-// Hexagon_V65_v512v512LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlut4
-class Hexagon_V65_v512v512LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vmaxb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxb_128B">;
-//
-// Hexagon_V65_v1024v1024LLi_Intrinsic<string GCCIntSuffix>
-// tag : V6_vlut4_128B
-class Hexagon_V65_v1024v1024LLi_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vandvqv :
+Hexagon_v16i32_v512i1v16i32_Intrinsic<"HEXAGON_V6_vandvqv">;
-//
-// Hexagon_V65_v512v512i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vmpyuhe
-class Hexagon_V65_v512v512i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vandvqv_128B :
+Hexagon_v32i32_v1024i1v32i32_Intrinsic<"HEXAGON_V6_vandvqv_128B">;
-//
-// Hexagon_V65_v512v64i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vprefixqb
-class Hexagon_V65_v512v64i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v16i32_ty], [llvm_v512i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddcarry :
+Hexagon_custom_v16i32v512i1_v16i32v16i32v512i1_Intrinsic;
-//
-// Hexagon_V65_v1024v128i_Intrinsic<string GCCIntSuffix>
-// tag : V6_vprefixqb_128B
-class Hexagon_V65_v1024v128i_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v32i32_ty], [llvm_v1024i1_ty],
- [IntrNoMem]>;
+def int_hexagon_V6_vaddcarry_128B :
+Hexagon_custom_v32i32v1024i1_v32i32v32i32v1024i1_Intrinsic_128B;
-//
-// BUILTIN_INFO(HEXAGON.A6_vcmpbeq_notany,QI_ftype_DIDI,2)
-// tag : A6_vcmpbeq_notany
-def int_hexagon_A6_vcmpbeq_notany :
-Hexagon_V65_iLLiLLi_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;
+def int_hexagon_V6_vasrwuhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;
-//
-// BUILTIN_INFO(HEXAGON.A6_vcmpbeq_notany_128B,QI_ftype_DIDI,2)
-// tag : A6_vcmpbeq_notany_128B
-def int_hexagon_A6_vcmpbeq_notany_128B :
-Hexagon_V65_iLLiLLi_Intrinsic<"HEXAGON_A6_vcmpbeq_notany_128B">;
+def int_hexagon_V6_vasrwuhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt,VD_ftype_VIDI,2)
-// tag : V6_vrmpyub_rtt
-def int_hexagon_V6_vrmpyub_rtt :
-Hexagon_V65_v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
+def int_hexagon_V6_vlutvvbi :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_128B,VD_ftype_VIDI,2)
-// tag : V6_vrmpyub_rtt_128B
-def int_hexagon_V6_vrmpyub_rtt_128B :
-Hexagon_V65_v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
+def int_hexagon_V6_vlutvvbi_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_acc,VD_ftype_VDVIDI,3)
-// tag : V6_vrmpyub_rtt_acc
-def int_hexagon_V6_vrmpyub_rtt_acc :
-Hexagon_V65_v1024v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
+def int_hexagon_V6_vsubuwsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuwsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpyub_rtt_acc_128B,VD_ftype_VDVIDI,3)
-// tag : V6_vrmpyub_rtt_acc_128B
-def int_hexagon_V6_vrmpyub_rtt_acc_128B :
-Hexagon_V65_v2048v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
+def int_hexagon_V6_vsubuwsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt,VD_ftype_VIDI,2)
-// tag : V6_vrmpybub_rtt
-def int_hexagon_V6_vrmpybub_rtt :
-Hexagon_V65_v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
+def int_hexagon_V6_vaddbsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_128B,VD_ftype_VIDI,2)
-// tag : V6_vrmpybub_rtt_128B
-def int_hexagon_V6_vrmpybub_rtt_128B :
-Hexagon_V65_v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
+def int_hexagon_V6_vaddbsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_acc,VD_ftype_VDVIDI,3)
-// tag : V6_vrmpybub_rtt_acc
-def int_hexagon_V6_vrmpybub_rtt_acc :
-Hexagon_V65_v1024v1024v512LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
+def int_hexagon_V6_ldnp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldnp0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vrmpybub_rtt_acc_128B,VD_ftype_VDVIDI,3)
-// tag : V6_vrmpybub_rtt_acc_128B
-def int_hexagon_V6_vrmpybub_rtt_acc_128B :
-Hexagon_V65_v2048v2048v1024LLi_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
+def int_hexagon_V6_ldnp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldnp0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruwuhsat,VI_ftype_VIVISI,3)
-// tag : V6_vasruwuhsat
-def int_hexagon_V6_vasruwuhsat :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruwuhsat">;
+def int_hexagon_V6_vasruwuhrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruwuhsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasruwuhsat_128B
-def int_hexagon_V6_vasruwuhsat_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;
+def int_hexagon_V6_vasruwuhrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruhubsat,VI_ftype_VIVISI,3)
-// tag : V6_vasruhubsat
-def int_hexagon_V6_vasruhubsat :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruhubsat">;
+def int_hexagon_V6_vrounduwuh :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduwuh">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruhubsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasruhubsat_128B
-def int_hexagon_V6_vasruhubsat_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;
+def int_hexagon_V6_vrounduwuh_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruhubrndsat,VI_ftype_VIVISI,3)
-// tag : V6_vasruhubrndsat
-def int_hexagon_V6_vasruhubrndsat :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;
+def int_hexagon_V6_vlutvvb_nm :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasruhubrndsat_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasruhubrndsat_128B
-def int_hexagon_V6_vasruhubrndsat_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;
+def int_hexagon_V6_vlutvvb_nm_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslh_acc,VI_ftype_VIVISI,3)
-// tag : V6_vaslh_acc
-def int_hexagon_V6_vaslh_acc :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vaslh_acc">;
+def int_hexagon_V6_pred_scalar2v2 :
+Hexagon_v512i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vaslh_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vaslh_acc_128B
-def int_hexagon_V6_vaslh_acc_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;
+def int_hexagon_V6_pred_scalar2v2_128B :
+Hexagon_v1024i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrh_acc,VI_ftype_VIVISI,3)
-// tag : V6_vasrh_acc
-def int_hexagon_V6_vasrh_acc :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrh_acc">;
+def int_hexagon_V6_ldp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldp0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vasrh_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vasrh_acc_128B
-def int_hexagon_V6_vasrh_acc_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;
+def int_hexagon_V6_ldp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldp0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguw,VI_ftype_VIVI,2)
-// tag : V6_vavguw
-def int_hexagon_V6_vavguw :
-Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavguw">;
+def int_hexagon_V6_vaddubh_acc :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguw_128B,VI_ftype_VIVI,2)
-// tag : V6_vavguw_128B
-def int_hexagon_V6_vavguw_128B :
-Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguw_128B">;
+def int_hexagon_V6_vaddubh_acc_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguwrnd,VI_ftype_VIVI,2)
-// tag : V6_vavguwrnd
-def int_hexagon_V6_vavguwrnd :
-Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavguwrnd">;
+def int_hexagon_V6_vaddclbw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavguwrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavguwrnd_128B
-def int_hexagon_V6_vavguwrnd_128B :
-Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;
+def int_hexagon_V6_vaddclbw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgb,VI_ftype_VIVI,2)
-// tag : V6_vavgb
-def int_hexagon_V6_vavgb :
-Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavgb">;
+def int_hexagon_V6_ldcpnt0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcpnt0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgb_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgb_128B
-def int_hexagon_V6_vavgb_128B :
-Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgb_128B">;
+def int_hexagon_V6_ldcpnt0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcpnt0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgbrnd,VI_ftype_VIVI,2)
-// tag : V6_vavgbrnd
-def int_hexagon_V6_vavgbrnd :
-Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vavgbrnd">;
+def int_hexagon_V6_vadduwsat_dv :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vavgbrnd_128B,VI_ftype_VIVI,2)
-// tag : V6_vavgbrnd_128B
-def int_hexagon_V6_vavgbrnd_128B :
-Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;
+def int_hexagon_V6_vadduwsat_dv_128B :
+Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgb,VI_ftype_VIVI,2)
-// tag : V6_vnavgb
-def int_hexagon_V6_vnavgb :
-Hexagon_V65_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgb">;
+def int_hexagon_V6_vmpyiwub :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vnavgb_128B,VI_ftype_VIVI,2)
-// tag : V6_vnavgb_128B
-def int_hexagon_V6_vnavgb_128B :
-Hexagon_V65_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
+def int_hexagon_V6_vmpyiwub_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsb,VI_ftype_VI,1)
-// tag : V6_vabsb
-def int_hexagon_V6_vabsb :
-Hexagon_V65_v512v512_Intrinsic<"HEXAGON_V6_vabsb">;
+def int_hexagon_V6_vsubububb_sat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubububb_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsb_128B,VI_ftype_VI,1)
-// tag : V6_vabsb_128B
-def int_hexagon_V6_vabsb_128B :
-Hexagon_V65_v1024v1024_Intrinsic<"HEXAGON_V6_vabsb_128B">;
+def int_hexagon_V6_vsubububb_sat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsb_sat,VI_ftype_VI,1)
-// tag : V6_vabsb_sat
-def int_hexagon_V6_vabsb_sat :
-Hexagon_V65_v512v512_Intrinsic<"HEXAGON_V6_vabsb_sat">;
+def int_hexagon_V6_ldcnp0 :
+Hexagon_v16i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnp0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vabsb_sat_128B,VI_ftype_VI,1)
-// tag : V6_vabsb_sat_128B
-def int_hexagon_V6_vabsb_sat_128B :
-Hexagon_V65_v1024v1024_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;
+def int_hexagon_V6_ldcnp0_128B :
+Hexagon_v32i32_i32i32_Intrinsic<"HEXAGON_V6_ldcnp0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuu,VD_ftype_VDSI,2)
-// tag : V6_vmpabuu
-def int_hexagon_V6_vmpabuu :
-Hexagon_V65_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabuu">;
+def int_hexagon_V6_vlutvwh_oracci :
+Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuu_128B,VD_ftype_VDSI,2)
-// tag : V6_vmpabuu_128B
-def int_hexagon_V6_vmpabuu_128B :
-Hexagon_V65_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;
+def int_hexagon_V6_vlutvwh_oracci_128B :
+Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuu_acc,VD_ftype_VDVDSI,3)
-// tag : V6_vmpabuu_acc
-def int_hexagon_V6_vmpabuu_acc :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;
+def int_hexagon_V6_vsubbsat :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpabuu_acc_128B,VD_ftype_VDVDSI,3)
-// tag : V6_vmpabuu_acc_128B
-def int_hexagon_V6_vmpabuu_acc_128B :
-Hexagon_V65_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;
+def int_hexagon_V6_vsubbsat_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyh_acc,VD_ftype_VDVISI,3)
-// tag : V6_vmpyh_acc
-def int_hexagon_V6_vmpyh_acc :
-Hexagon_V65_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyh_acc">;
+// V65 HVX Instructions.
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyh_acc_128B,VD_ftype_VDVISI,3)
-// tag : V6_vmpyh_acc_128B
-def int_hexagon_V6_vmpyh_acc_128B :
-Hexagon_V65_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;
+def int_hexagon_V6_vasruhubrndsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahhsat,VI_ftype_VIVIDI,3)
-// tag : V6_vmpahhsat
-def int_hexagon_V6_vmpahhsat :
-Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpahhsat">;
+def int_hexagon_V6_vasruhubrndsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpahhsat_128B,VI_ftype_VIVIDI,3)
-// tag : V6_vmpahhsat_128B
-def int_hexagon_V6_vmpahhsat_128B :
-Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;
+def int_hexagon_V6_vrmpybub_rtt :
+Hexagon_v32i32_v16i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhuhsat,VI_ftype_VIVIDI,3)
-// tag : V6_vmpauhuhsat
-def int_hexagon_V6_vmpauhuhsat :
-Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;
+def int_hexagon_V6_vrmpybub_rtt_128B :
+Hexagon_v64i32_v32i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpauhuhsat_128B,VI_ftype_VIVIDI,3)
-// tag : V6_vmpauhuhsat_128B
-def int_hexagon_V6_vmpauhuhsat_128B :
-Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;
+def int_hexagon_V6_vmpahhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpsuhuhsat,VI_ftype_VIVIDI,3)
-// tag : V6_vmpsuhuhsat
-def int_hexagon_V6_vmpsuhuhsat :
-Hexagon_V65_v512v512v512LLi_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;
+def int_hexagon_V6_vmpahhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpsuhuhsat_128B,VI_ftype_VIVIDI,3)
-// tag : V6_vmpsuhuhsat_128B
-def int_hexagon_V6_vmpsuhuhsat_128B :
-Hexagon_V65_v1024v1024v1024LLi_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;
+def int_hexagon_V6_vavguwrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguwrnd">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlut4,VI_ftype_VIDI,2)
-// tag : V6_vlut4
-def int_hexagon_V6_vlut4 :
-Hexagon_V65_v512v512LLi_Intrinsic<"HEXAGON_V6_vlut4">;
+def int_hexagon_V6_vavguwrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vlut4_128B,VI_ftype_VIDI,2)
-// tag : V6_vlut4_128B
-def int_hexagon_V6_vlut4_128B :
-Hexagon_V65_v1024v1024LLi_Intrinsic<"HEXAGON_V6_vlut4_128B">;
+def int_hexagon_V6_vnavgb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhe,VI_ftype_VISI,2)
-// tag : V6_vmpyuhe
-def int_hexagon_V6_vmpyuhe :
-Hexagon_V65_v512v512i_Intrinsic<"HEXAGON_V6_vmpyuhe">;
+def int_hexagon_V6_vnavgb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_128B,VI_ftype_VISI,2)
-// tag : V6_vmpyuhe_128B
-def int_hexagon_V6_vmpyuhe_128B :
-Hexagon_V65_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;
+def int_hexagon_V6_vasrh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_acc,VI_ftype_VIVISI,3)
-// tag : V6_vmpyuhe_acc
-def int_hexagon_V6_vmpyuhe_acc :
-Hexagon_V65_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;
+def int_hexagon_V6_vasrh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vmpyuhe_acc_128B,VI_ftype_VIVISI,3)
-// tag : V6_vmpyuhe_acc_128B
-def int_hexagon_V6_vmpyuhe_acc_128B :
-Hexagon_V65_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;
+def int_hexagon_V6_vmpauhuhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqb,VI_ftype_QV,1)
-// tag : V6_vprefixqb
-def int_hexagon_V6_vprefixqb :
-Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqb">;
+def int_hexagon_V6_vmpauhuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqb_128B,VI_ftype_QV,1)
-// tag : V6_vprefixqb_128B
-def int_hexagon_V6_vprefixqb_128B :
-Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;
+def int_hexagon_V6_vmpyh_acc :
+Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqh,VI_ftype_QV,1)
-// tag : V6_vprefixqh
-def int_hexagon_V6_vprefixqh :
-Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqh">;
+def int_hexagon_V6_vmpyh_acc_128B :
+Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqh_128B,VI_ftype_QV,1)
-// tag : V6_vprefixqh_128B
-def int_hexagon_V6_vprefixqh_128B :
-Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;
+def int_hexagon_V6_vrmpybub_rtt_acc :
+Hexagon_v32i32_v32i32v16i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqw,VI_ftype_QV,1)
-// tag : V6_vprefixqw
-def int_hexagon_V6_vprefixqw :
-Hexagon_V65_v512v64i_Intrinsic<"HEXAGON_V6_vprefixqw">;
+def int_hexagon_V6_vrmpybub_rtt_acc_128B :
+Hexagon_v64i32_v64i32v32i32i64_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vprefixqw_128B,VI_ftype_QV,1)
-// tag : V6_vprefixqw_128B
-def int_hexagon_V6_vprefixqw_128B :
-Hexagon_V65_v1024v128i_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;
+def int_hexagon_V6_vavgb :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgb">;
+def int_hexagon_V6_vavgb_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgb_128B">;
-// The scatter/gather ones below will not be generated from iset.py. Make sure
-// you don't overwrite these.
-class Hexagon_V65_vvmemiiv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v16i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vaslh_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc">;
-class Hexagon_V65_vvmemiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vaslh_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;
-class Hexagon_V65_vvmemiiv2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,
- llvm_v64i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vavguw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguw">;
-class Hexagon_V65_vvmemv64iiiv512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v16i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vavguw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguw_128B">;
-class Hexagon_V65_vvmemv128iiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vlut4 :
+Hexagon_v16i32_v16i32i64_Intrinsic<"HEXAGON_V6_vlut4">;
-class Hexagon_V65_vvmemv64iiiv1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vlut4_128B :
+Hexagon_v32i32_v32i32i64_Intrinsic<"HEXAGON_V6_vlut4_128B">;
-class Hexagon_V65_vvmemv128iiiv2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_ptr_ty,llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v64i32_ty],
- [IntrArgMemOnly]>;
+def int_hexagon_V6_vmpyuhe_acc :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;
-def int_hexagon_V6_vgathermw :
-Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermw">;
+def int_hexagon_V6_vmpyuhe_acc_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;
-def int_hexagon_V6_vgathermw_128B :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermw_128B">;
+def int_hexagon_V6_vrmpyub_rtt :
+Hexagon_v32i32_v16i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
-def int_hexagon_V6_vgathermh :
-Hexagon_V65_vvmemiiv512_Intrinsic<"HEXAGON_V6_vgathermh">;
+def int_hexagon_V6_vrmpyub_rtt_128B :
+Hexagon_v64i32_v32i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
-def int_hexagon_V6_vgathermh_128B :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermh_128B">;
+def int_hexagon_V6_vmpsuhuhsat :
+Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;
-def int_hexagon_V6_vgathermhw :
-Hexagon_V65_vvmemiiv1024_Intrinsic<"HEXAGON_V6_vgathermhw">;
+def int_hexagon_V6_vmpsuhuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;
-def int_hexagon_V6_vgathermhw_128B :
-Hexagon_V65_vvmemiiv2048_Intrinsic<"HEXAGON_V6_vgathermhw_128B">;
+def int_hexagon_V6_vasruhubsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat">;
-def int_hexagon_V6_vgathermwq :
-Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermwq">;
+def int_hexagon_V6_vasruhubsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;
-def int_hexagon_V6_vgathermwq_128B :
-Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermwq_128B">;
+def int_hexagon_V6_vmpyuhe :
+Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe">;
-def int_hexagon_V6_vgathermhq :
-Hexagon_V65_vvmemv64iiiv512_Intrinsic<"HEXAGON_V6_vgathermhq">;
+def int_hexagon_V6_vmpyuhe_128B :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;
-def int_hexagon_V6_vgathermhq_128B :
-Hexagon_V65_vvmemv128iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhq_128B">;
+def int_hexagon_V6_vrmpyub_rtt_acc :
+Hexagon_v32i32_v32i32v16i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
-def int_hexagon_V6_vgathermhwq :
-Hexagon_V65_vvmemv64iiiv1024_Intrinsic<"HEXAGON_V6_vgathermhwq">;
+def int_hexagon_V6_vrmpyub_rtt_acc_128B :
+Hexagon_v64i32_v64i32v32i32i64_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
-def int_hexagon_V6_vgathermhwq_128B :
-Hexagon_V65_vvmemv128iiiv2048_Intrinsic<"HEXAGON_V6_vgathermhwq_128B">;
+def int_hexagon_V6_vasruwuhsat :
+Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat">;
-class Hexagon_V65_viiv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v16i32_ty,llvm_v16i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vasruwuhsat_128B :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;
-class Hexagon_V65_viiv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty,llvm_v32i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vmpabuu_acc :
+Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;
-class Hexagon_V65_vv64iiiv512v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v16i32_ty,
- llvm_v16i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vmpabuu_acc_128B :
+Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;
-class Hexagon_V65_vv128iiiv1024v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty,
- llvm_v32i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vprefixqw :
+Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqw">;
-class Hexagon_V65_viiv1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v32i32_ty,llvm_v16i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vprefixqw_128B :
+Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;
-class Hexagon_V65_viiv2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_i32_ty,llvm_i32_ty,
- llvm_v64i32_ty,llvm_v32i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vprefixqh :
+Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqh">;
-class Hexagon_V65_vv64iiiv1024v512_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v512i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v32i32_ty,
- llvm_v16i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vprefixqh_128B :
+Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;
-class Hexagon_V65_vv128iiiv2048v1024_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [], [llvm_v1024i1_ty,llvm_i32_ty,
- llvm_i32_ty,llvm_v64i32_ty,
- llvm_v32i32_ty],
- [IntrWriteMem]>;
+def int_hexagon_V6_vprefixqb :
+Hexagon_v16i32_v512i1_Intrinsic<"HEXAGON_V6_vprefixqb">;
-class Hexagon_V65_v2048_Intrinsic<string GCCIntSuffix>
- : Hexagon_Intrinsic<GCCIntSuffix,
- [llvm_v64i32_ty], [],
- [IntrNoMem]>;
+def int_hexagon_V6_vprefixqb_128B :
+Hexagon_v32i32_v1024i1_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw
-def int_hexagon_V6_vscattermw :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw">;
+def int_hexagon_V6_vabsb :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_128B
-def int_hexagon_V6_vscattermw_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_128B">;
+def int_hexagon_V6_vabsb_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh
-def int_hexagon_V6_vscattermh :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh">;
+def int_hexagon_V6_vavgbrnd :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgbrnd">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_128B
-def int_hexagon_V6_vscattermh_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_128B">;
+def int_hexagon_V6_vavgbrnd_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_add,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_add
-def int_hexagon_V6_vscattermw_add :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermw_add">;
+def int_hexagon_V6_vdd0 :
+Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vdd0">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermw_add_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermw_add_128B
-def int_hexagon_V6_vscattermw_add_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermw_add_128B">;
+def int_hexagon_V6_vdd0_128B :
+Hexagon_v64i32__Intrinsic<"HEXAGON_V6_vdd0_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_add,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_add
-def int_hexagon_V6_vscattermh_add :
-Hexagon_V65_viiv512v512_Intrinsic<"HEXAGON_V6_vscattermh_add">;
+def int_hexagon_V6_vmpabuu :
+Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermh_add_128B,v_ftype_SISIVIVI,4)
-// tag : V6_vscattermh_add_128B
-def int_hexagon_V6_vscattermh_add_128B :
-Hexagon_V65_viiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermh_add_128B">;
+def int_hexagon_V6_vmpabuu_128B :
+Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermwq,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermwq
-def int_hexagon_V6_vscattermwq :
-Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermwq">;
+def int_hexagon_V6_vabsb_sat :
+Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb_sat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermwq_128B,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermwq_128B
-def int_hexagon_V6_vscattermwq_128B :
-Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermwq_128B">;
+def int_hexagon_V6_vabsb_sat_128B :
+Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhq,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermhq
-def int_hexagon_V6_vscattermhq :
-Hexagon_V65_vv64iiiv512v512_Intrinsic<"HEXAGON_V6_vscattermhq">;
+// V66 HVX Instructions.
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhq_128B,v_ftype_QVSISIVIVI,5)
-// tag : V6_vscattermhq_128B
-def int_hexagon_V6_vscattermhq_128B :
-Hexagon_V65_vv128iiiv1024v1024_Intrinsic<"HEXAGON_V6_vscattermhq_128B">;
+def int_hexagon_V6_vaddcarrysat :
+Hexagon_v16i32_v16i32v16i32v512i1_Intrinsic<"HEXAGON_V6_vaddcarrysat">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw
-def int_hexagon_V6_vscattermhw :
-Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw">;
+def int_hexagon_V6_vaddcarrysat_128B :
+Hexagon_v32i32_v32i32v32i32v1024i1_Intrinsic<"HEXAGON_V6_vaddcarrysat_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_128B,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_128B
-def int_hexagon_V6_vscattermhw_128B :
-Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_128B">;
+def int_hexagon_V6_vasr_into :
+Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vasr_into">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhwq,v_ftype_QVSISIVDVI,5)
-// tag : V6_vscattermhwq
-def int_hexagon_V6_vscattermhwq :
-Hexagon_V65_vv64iiiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhwq">;
+def int_hexagon_V6_vasr_into_128B :
+Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vasr_into_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhwq_128B,v_ftype_QVSISIVDVI,5)
-// tag : V6_vscattermhwq_128B
-def int_hexagon_V6_vscattermhwq_128B :
-Hexagon_V65_vv128iiiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhwq_128B">;
+def int_hexagon_V6_vsatdw :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatdw">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_add
-def int_hexagon_V6_vscattermhw_add :
-Hexagon_V65_viiv1024v512_Intrinsic<"HEXAGON_V6_vscattermhw_add">;
+def int_hexagon_V6_vsatdw_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatdw_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vscattermhw_add_128B,v_ftype_SISIVDVI,4)
-// tag : V6_vscattermhw_add_128B
-def int_hexagon_V6_vscattermhw_add_128B :
-Hexagon_V65_viiv2048v1024_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B">;
+def int_hexagon_V6_vrotr :
+Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrotr">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdd0,VD_ftype_,0)
-// tag : V6_vdd0
-def int_hexagon_V6_vdd0 :
-Hexagon_v1024_Intrinsic<"HEXAGON_V6_vdd0">;
+def int_hexagon_V6_vrotr_128B :
+Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrotr_128B">;
-//
-// BUILTIN_INFO(HEXAGON.V6_vdd0_128B,VD_ftype_,0)
-// tag : V6_vdd0_128B
-def int_hexagon_V6_vdd0_128B :
-Hexagon_V65_v2048_Intrinsic<"HEXAGON_V6_vdd0_128B">;
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index 3433aaa402eb..62b2e8f77e7d 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -83,6 +83,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_fmaf128_round_to_odd
: GCCBuiltin<"__builtin_fmaf128_round_to_odd">,
Intrinsic <[llvm_f128_ty], [llvm_f128_ty,llvm_f128_ty,llvm_f128_ty], [IntrNoMem]>;
+ def int_ppc_scalar_extract_expq
+ : GCCBuiltin<"__builtin_vsx_scalar_extract_expq">,
+ Intrinsic <[llvm_i64_ty], [llvm_f128_ty], [IntrNoMem]>;
+ def int_ppc_scalar_insert_exp_qp
+ : GCCBuiltin<"__builtin_vsx_scalar_insert_exp_qp">,
+ Intrinsic <[llvm_f128_ty], [llvm_f128_ty, llvm_i64_ty], [IntrNoMem]>;
}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsRISCV.td b/contrib/llvm/include/llvm/IR/IntrinsicsRISCV.td
new file mode 100644
index 000000000000..0ac7348b56db
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -0,0 +1,44 @@
+//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the RISCV-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "riscv" in {
+
+//===----------------------------------------------------------------------===//
+// Atomics
+
+class MaskedAtomicRMW32Intrinsic
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+class MaskedAtomicRMW32WithSextIntrinsic
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+def int_riscv_masked_atomicrmw_xchg_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_add_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_sub_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_nand_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_max_i32 : MaskedAtomicRMW32WithSextIntrinsic;
+def int_riscv_masked_atomicrmw_min_i32 : MaskedAtomicRMW32WithSextIntrinsic;
+def int_riscv_masked_atomicrmw_umax_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_umin_i32 : MaskedAtomicRMW32Intrinsic;
+
+def int_riscv_masked_cmpxchg_i32
+ : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+} // TargetPrefix = "riscv"
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index 7afc755a1e37..b015650906e0 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -24,17 +24,16 @@ def int_wasm_memory_grow : Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty, LLVMMatchType<0>],
[]>;
-// These are the old names.
-def int_wasm_mem_size : Intrinsic<[llvm_anyint_ty],
- [llvm_i32_ty],
- [IntrReadMem]>;
-def int_wasm_mem_grow : Intrinsic<[llvm_anyint_ty],
- [llvm_i32_ty, LLVMMatchType<0>],
- []>;
+//===----------------------------------------------------------------------===//
+// Saturating float-to-int conversions
+//===----------------------------------------------------------------------===//
-// These are the old old names. They also lack the immediate field.
-def int_wasm_current_memory : Intrinsic<[llvm_anyint_ty], [], [IntrReadMem]>;
-def int_wasm_grow_memory : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], []>;
+def int_wasm_trunc_saturate_signed : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_trunc_saturate_unsigned : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty],
+ [IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Exception handling intrinsics
@@ -60,8 +59,57 @@ def int_wasm_catch : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty],
// WebAssembly EH must maintain the landingpads in the order assigned to them
// by WasmEHPrepare pass to generate landingpad table in EHStreamer. This is
// used in order to give them the indices in WasmEHPrepare.
-def int_wasm_landingpad_index: Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
+def int_wasm_landingpad_index: Intrinsic<[], [llvm_token_ty, llvm_i32_ty],
+ [IntrNoMem]>;
// Returns LSDA address of the current function.
def int_wasm_lsda : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-}
+
+//===----------------------------------------------------------------------===//
+// Atomic intrinsics
+//===----------------------------------------------------------------------===//
+
+// wait / notify
+def int_wasm_atomic_wait_i32 :
+ Intrinsic<[llvm_i32_ty],
+ [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
+ [IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
+ IntrHasSideEffects],
+ "", [SDNPMemOperand]>;
+def int_wasm_atomic_wait_i64 :
+ Intrinsic<[llvm_i32_ty],
+ [LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
+ [IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
+ IntrHasSideEffects],
+ "", [SDNPMemOperand]>;
+def int_wasm_atomic_notify:
+ Intrinsic<[llvm_i32_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [IntrInaccessibleMemOnly, NoCapture<0>, IntrHasSideEffects], "",
+ [SDNPMemOperand]>;
+
+//===----------------------------------------------------------------------===//
+// SIMD intrinsics
+//===----------------------------------------------------------------------===//
+
+def int_wasm_sub_saturate_signed :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_sub_saturate_unsigned :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_bitselect :
+ Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_anytrue :
+ Intrinsic<[llvm_i32_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_alltrue :
+ Intrinsic<[llvm_i32_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+
+} // TargetPrefix = "wasm"
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsX86.td b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
index 905afc130d8f..8d8cc8e97678 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -27,12 +27,6 @@ let TargetPrefix = "x86" in {
// Marks the EH guard slot node created in LLVM IR prior to code generation.
def int_x86_seh_ehguard : Intrinsic<[], [llvm_ptr_ty], []>;
-
- // Given a pointer to the end of an EH registration object, returns the true
- // parent frame address that can be used with llvm.localrecover.
- def int_x86_seh_recoverfp : Intrinsic<[llvm_ptr_ty],
- [llvm_ptr_ty, llvm_ptr_ty],
- [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -53,8 +47,8 @@ let TargetPrefix = "x86" in {
let TargetPrefix = "x86" in {
def int_x86_rdtsc : GCCBuiltin<"__builtin_ia32_rdtsc">,
Intrinsic<[llvm_i64_ty], [], []>;
- def int_x86_rdtscp : GCCBuiltin<"__builtin_ia32_rdtscp">,
- Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrArgMemOnly]>;
+ def int_x86_rdtscp :
+ Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
}
// Read Performance-Monitoring Counter.
@@ -364,30 +358,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Integer arithmetic ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem, Commutative]>;
- def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem, Commutative]>;
- def int_x86_sse2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem, Commutative]>;
- def int_x86_sse2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem, Commutative]>;
- def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem]>;
- def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem]>;
- def int_x86_sse2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem]>;
- def int_x86_sse2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem, Commutative]>;
@@ -1336,21 +1306,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// BITALG bits shuffle
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mask_vpshufbitqmb_128 :
- GCCBuiltin<"__builtin_ia32_vpshufbitqmb128_mask">,
- Intrinsic<[llvm_i16_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
- [IntrNoMem]>;
- def int_x86_avx512_mask_vpshufbitqmb_256 :
- GCCBuiltin<"__builtin_ia32_vpshufbitqmb256_mask">,
- Intrinsic<[llvm_i32_ty],
- [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_mask_vpshufbitqmb_512 :
- GCCBuiltin<"__builtin_ia32_vpshufbitqmb512_mask">,
- Intrinsic<[llvm_i64_ty],
- [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
- [IntrNoMem]>;
+ def int_x86_avx512_vpshufbitqmb_128 :
+ Intrinsic<[llvm_v16i1_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_vpshufbitqmb_256 :
+ Intrinsic<[llvm_v32i1_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_vpshufbitqmb_512 :
+ Intrinsic<[llvm_v64i1_ty], [llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1358,30 +1319,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Integer arithmetic ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
- llvm_v32i8_ty], [IntrNoMem, Commutative]>;
- def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty], [IntrNoMem, Commutative]>;
- def int_x86_avx2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb256">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
- llvm_v32i8_ty], [IntrNoMem, Commutative]>;
- def int_x86_avx2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw256">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty], [IntrNoMem, Commutative]>;
- def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
- llvm_v32i8_ty], [IntrNoMem]>;
- def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty], [IntrNoMem]>;
- def int_x86_avx2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb256">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
- llvm_v32i8_ty], [IntrNoMem]>;
- def int_x86_avx2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw256">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty], [IntrNoMem]>;
def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
llvm_v16i16_ty], [IntrNoMem, Commutative]>;
@@ -1518,18 +1455,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmultishift_qb_128:
- GCCBuiltin<"__builtin_ia32_vpmultishiftqb128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmultishift_qb_256:
- GCCBuiltin<"__builtin_ia32_vpmultishiftqb256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmultishift_qb_512:
- GCCBuiltin<"__builtin_ia32_vpmultishiftqb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_pmultishift_qb_128:
+ GCCBuiltin<"__builtin_ia32_vpmultishiftqb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pmultishift_qb_256:
+ GCCBuiltin<"__builtin_ia32_vpmultishiftqb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pmultishift_qb_512:
+ GCCBuiltin<"__builtin_ia32_vpmultishiftqb512">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>;
}
// Pack ops.
@@ -1739,83 +1673,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_psrav_w_512 : GCCBuiltin<"__builtin_ia32_psrav32hi">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
[IntrNoMem]>;
-
- def int_x86_avx512_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty], [IntrNoMem]>;
- def int_x86_avx512_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty], [IntrNoMem]>;
- def int_x86_avx512_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
-
- def int_x86_avx512_prol_d_128 : GCCBuiltin<"__builtin_ia32_prold128">,
- Intrinsic<[llvm_v4i32_ty] , [llvm_v4i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prol_d_256 : GCCBuiltin<"__builtin_ia32_prold256">,
- Intrinsic<[llvm_v8i32_ty] , [llvm_v8i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prol_d_512 : GCCBuiltin<"__builtin_ia32_prold512">,
- Intrinsic<[llvm_v16i32_ty] , [llvm_v16i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prol_q_128 : GCCBuiltin<"__builtin_ia32_prolq128">,
- Intrinsic<[llvm_v2i64_ty] , [llvm_v2i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prol_q_256 : GCCBuiltin<"__builtin_ia32_prolq256">,
- Intrinsic<[llvm_v4i64_ty] , [llvm_v4i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prol_q_512 : GCCBuiltin<"__builtin_ia32_prolq512">,
- Intrinsic<[llvm_v8i64_ty] , [llvm_v8i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
-
-
- def int_x86_avx512_prolv_d_128 : GCCBuiltin<"__builtin_ia32_prolvd128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prolv_d_256 : GCCBuiltin<"__builtin_ia32_prolvd256">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prolv_d_512 : GCCBuiltin<"__builtin_ia32_prolvd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_prolv_q_128 : GCCBuiltin<"__builtin_ia32_prolvq128">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty], [IntrNoMem]>;
- def int_x86_avx512_prolv_q_256 : GCCBuiltin<"__builtin_ia32_prolvq256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty], [IntrNoMem]>;
- def int_x86_avx512_prolv_q_512 : GCCBuiltin<"__builtin_ia32_prolvq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_d_128 : GCCBuiltin<"__builtin_ia32_prord128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_d_256 : GCCBuiltin<"__builtin_ia32_prord256">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_d_512 : GCCBuiltin<"__builtin_ia32_prord512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_q_128 : GCCBuiltin<"__builtin_ia32_prorq128">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_q_256 : GCCBuiltin<"__builtin_ia32_prorq256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pror_q_512 : GCCBuiltin<"__builtin_ia32_prorq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_i32_ty], [IntrNoMem]>;
-
}
// Gather ops
@@ -2187,32 +2044,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
-
- def int_x86_xop_vprotb : GCCBuiltin<"__builtin_ia32_vprotb">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotd : GCCBuiltin<"__builtin_ia32_vprotd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotq : GCCBuiltin<"__builtin_ia32_vprotq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotw : GCCBuiltin<"__builtin_ia32_vprotw">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotbi : GCCBuiltin<"__builtin_ia32_vprotbi">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotdi : GCCBuiltin<"__builtin_ia32_vprotdi">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotqi : GCCBuiltin<"__builtin_ia32_vprotqi">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
- [IntrNoMem]>;
- def int_x86_xop_vprotwi : GCCBuiltin<"__builtin_ia32_vprotwi">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
def int_x86_xop_vpshab :
GCCBuiltin<"__builtin_ia32_vpshab">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
@@ -2750,24 +2581,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// ADX
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_addcarryx_u32: GCCBuiltin<"__builtin_ia32_addcarryx_u32">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
- def int_x86_addcarryx_u64: GCCBuiltin<"__builtin_ia32_addcarryx_u64">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
- def int_x86_addcarry_u32: GCCBuiltin<"__builtin_ia32_addcarry_u32">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
- def int_x86_addcarry_u64: GCCBuiltin<"__builtin_ia32_addcarry_u64">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
- def int_x86_subborrow_u32: GCCBuiltin<"__builtin_ia32_subborrow_u32">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
- def int_x86_subborrow_u64: GCCBuiltin<"__builtin_ia32_subborrow_u64">,
- Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
- llvm_ptr_ty], [IntrArgMemOnly]>;
+ def int_x86_addcarry_32:
+ Intrinsic<[llvm_i8_ty, llvm_i32_ty],
+ [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_addcarry_64:
+ Intrinsic<[llvm_i8_ty, llvm_i64_ty],
+ [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_subborrow_32:
+ Intrinsic<[llvm_i8_ty, llvm_i32_ty],
+ [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_subborrow_64:
+ Intrinsic<[llvm_i8_ty, llvm_i64_ty],
+ [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -2787,6 +2612,36 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
//===----------------------------------------------------------------------===//
// AVX512
+// Mask ops
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_kadd_b :
+ Intrinsic<[llvm_v8i1_ty], [llvm_v8i1_ty, llvm_v8i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_kadd_w :
+ Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_kadd_d :
+ Intrinsic<[llvm_v32i1_ty], [llvm_v32i1_ty, llvm_v32i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_kadd_q :
+ Intrinsic<[llvm_v64i1_ty], [llvm_v64i1_ty, llvm_v64i1_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_ktestc_b :
+ Intrinsic<[llvm_i32_ty], [llvm_v8i1_ty, llvm_v8i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestc_w :
+ Intrinsic<[llvm_i32_ty], [llvm_v16i1_ty, llvm_v16i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestc_d :
+ Intrinsic<[llvm_i32_ty], [llvm_v32i1_ty, llvm_v32i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestc_q :
+ Intrinsic<[llvm_i32_ty], [llvm_v64i1_ty, llvm_v64i1_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_ktestz_b :
+ Intrinsic<[llvm_i32_ty], [llvm_v8i1_ty, llvm_v8i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestz_w :
+ Intrinsic<[llvm_i32_ty], [llvm_v16i1_ty, llvm_v16i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestz_d :
+ Intrinsic<[llvm_i32_ty], [llvm_v32i1_ty, llvm_v32i1_ty], [IntrNoMem]>;
+ def int_x86_avx512_ktestz_q :
+ Intrinsic<[llvm_i32_ty], [llvm_v64i1_ty, llvm_v64i1_ty], [IntrNoMem]>;
+}
+
// Conversion ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_cvttss2si : GCCBuiltin<"__builtin_ia32_vcvttss2si32">,
@@ -3677,78 +3532,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
// Integer arithmetic ops
let TargetPrefix = "x86" in {
- def int_x86_avx512_mask_padds_b_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padds_b_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padds_b_512 : GCCBuiltin<"__builtin_ia32_paddsb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padds_w_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padds_w_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padds_w_512 : GCCBuiltin<"__builtin_ia32_paddsw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_b_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_b_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_b_512 : GCCBuiltin<"__builtin_ia32_paddusb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_w_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_w_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_paddus_w_512 : GCCBuiltin<"__builtin_ia32_paddusw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_b_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_b_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_b_512 : GCCBuiltin<"__builtin_ia32_psubsb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_w_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_w_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubs_w_512 : GCCBuiltin<"__builtin_ia32_psubsw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_b_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_b_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_b_512 : GCCBuiltin<"__builtin_ia32_psubusb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_w_128 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_w_256 : // FIXME: remove this intrinsic
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psubus_w_512 : GCCBuiltin<"__builtin_ia32_psubusw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
llvm_v32i16_ty], [IntrNoMem, Commutative]>;
@@ -3780,6 +3563,7 @@ let TargetPrefix = "x86" in {
// Gather and Scatter ops
let TargetPrefix = "x86" in {
+ // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
def int_x86_avx512_gather_dpd_512 : GCCBuiltin<"__builtin_ia32_gathersiv8df">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
@@ -3912,6 +3696,7 @@ let TargetPrefix = "x86" in {
[IntrReadMem, IntrArgMemOnly]>;
// scatter
+ // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
def int_x86_avx512_scatter_dpd_512 : GCCBuiltin<"__builtin_ia32_scattersiv8df">,
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
@@ -4072,6 +3857,239 @@ let TargetPrefix = "x86" in {
llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly]>;
}
+// AVX512 gather/scatter intrinsics that use vXi1 masks.
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_gather_dpd_512 :
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_dps_512 :
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_qpd_512 :
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_qps_512 :
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+
+ def int_x86_avx512_mask_gather_dpq_512 :
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_dpi_512 :
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_qpq_512 :
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+ def int_x86_avx512_mask_gather_qpi_512 :
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div2_df :
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div2_di :
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div4_df :
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div4_di :
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div4_sf :
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div4_si :
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div8_sf :
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3div8_si :
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv2_df :
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv2_di :
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv4_df :
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv4_di :
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv4_sf :
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv4_si :
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv8_sf :
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_gather3siv8_si :
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
+ [IntrReadMem, IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatter_dpd_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+ llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_dps_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
+ llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_qpd_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+ llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_qps_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+ llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+
+ def int_x86_avx512_mask_scatter_dpq_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
+ llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_dpi_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_qpq_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i32_ty],
+ [IntrArgMemOnly]>;
+ def int_x86_avx512_mask_scatter_qpi_512 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i64_ty, llvm_v8i32_ty,
+ llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv2_df :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv2_di :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv4_df :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv4_di :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv4_sf :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv4_si :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv8_sf :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scatterdiv8_si :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv2_df :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv2_di :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv4_df :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv4_di :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv4_sf :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv4_si :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv8_sf :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+
+ def int_x86_avx512_mask_scattersiv8_si :
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly]>;
+}
+
// AVX-512 conflict detection instruction
// Instructions that count the number of leading zero bits
let TargetPrefix = "x86" in {
@@ -4273,237 +4291,6 @@ let TargetPrefix = "x86" in {
llvm_i8_ty], [IntrNoMem]>;
}
-// VBMI2 Concat & Shift
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_vpshld_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshldq512">,
- Intrinsic<[llvm_v8i64_ty],
- [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshldq256">,
- Intrinsic<[llvm_v4i64_ty],
- [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshldq128">,
- Intrinsic<[llvm_v2i64_ty],
- [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vpshld_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshldd512">,
- Intrinsic<[llvm_v16i32_ty],
- [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshldd256">,
- Intrinsic<[llvm_v8i32_ty],
- [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshldd128">,
- Intrinsic<[llvm_v4i32_ty],
- [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vpshld_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshldw512">,
- Intrinsic<[llvm_v32i16_ty],
- [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshldw256">,
- Intrinsic<[llvm_v16i16_ty],
- [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshld_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshldw128">,
- Intrinsic<[llvm_v8i16_ty],
- [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vpshrd_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdq512">,
- Intrinsic<[llvm_v8i64_ty],
- [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdq256">,
- Intrinsic<[llvm_v4i64_ty],
- [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdq128">,
- Intrinsic<[llvm_v2i64_ty],
- [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vpshrd_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdd512">,
- Intrinsic<[llvm_v16i32_ty],
- [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdd256">,
- Intrinsic<[llvm_v8i32_ty],
- [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdd128">,
- Intrinsic<[llvm_v4i32_ty],
- [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vpshrd_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdw512">,
- Intrinsic<[llvm_v32i16_ty],
- [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdw256">,
- Intrinsic<[llvm_v16i16_ty],
- [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_vpshrd_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdw128">,
- Intrinsic<[llvm_v8i16_ty],
- [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshldv_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvw128_maskz">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvw256_maskz">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvw512_maskz">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshldv_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvq128_maskz">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvq256_maskz">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvq512_maskz">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshldv_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshldvd128_maskz">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshldvd256_maskz">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshldv_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshldv_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshldvd512_maskz">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshrdv_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_w_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw128_maskz">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_w_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw256_maskz">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_w_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvw512_maskz">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshrdv_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_q_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq128_maskz">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_q_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq256_maskz">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_q_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvq512_maskz">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpshrdv_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_d_128 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd128_maskz">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_d_256 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd256_maskz">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_vpshrdv_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_maskz_vpshrdv_d_512 :
- GCCBuiltin<"__builtin_ia32_vpshrdvd512_maskz">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
-}
-
// truncate
let TargetPrefix = "x86" in {
def int_x86_avx512_mask_pmov_qb_128 :
diff --git a/contrib/llvm/include/llvm/IR/LLVMContext.h b/contrib/llvm/include/llvm/IR/LLVMContext.h
index ebd445553167..bd7097b39a3e 100644
--- a/contrib/llvm/include/llvm/IR/LLVMContext.h
+++ b/contrib/llvm/include/llvm/IR/LLVMContext.h
@@ -102,6 +102,7 @@ public:
MD_associated = 22, // "associated"
MD_callees = 23, // "callees"
MD_irr_loop = 24, // "irr_loop"
+ MD_access_group = 25, // "llvm.access.group"
};
/// Known operand bundle tag IDs, which always have the same value. All
diff --git a/contrib/llvm/include/llvm/IR/LegacyPassManager.h b/contrib/llvm/include/llvm/IR/LegacyPassManager.h
index 9a376a151505..5257a0eed488 100644
--- a/contrib/llvm/include/llvm/IR/LegacyPassManager.h
+++ b/contrib/llvm/include/llvm/IR/LegacyPassManager.h
@@ -98,9 +98,6 @@ private:
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_STDCXX_CONVERSION_FUNCTIONS(legacy::PassManagerBase, LLVMPassManagerRef)
-/// If -time-passes has been specified, report the timings immediately and then
-/// reset the timers to zero.
-void reportAndResetTimings();
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/IR/LegacyPassManagers.h b/contrib/llvm/include/llvm/IR/LegacyPassManagers.h
index f6752f2817ba..51a2eb2a146d 100644
--- a/contrib/llvm/include/llvm/IR/LegacyPassManagers.h
+++ b/contrib/llvm/include/llvm/IR/LegacyPassManagers.h
@@ -406,11 +406,23 @@ public:
/// Set the initial size of the module if the user has specified that they
/// want remarks for size.
/// Returns 0 if the remark was not requested.
- unsigned initSizeRemarkInfo(Module &M);
+ unsigned initSizeRemarkInfo(
+ Module &M,
+ StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount);
/// Emit a remark signifying that the number of IR instructions in the module
/// changed.
- void emitInstrCountChangedRemark(Pass *P, Module &M, unsigned CountBefore);
+ /// \p F is optionally passed by passes which run on Functions, and thus
+ /// always know whether or not a non-empty function is available.
+ ///
+ /// \p FunctionToInstrCount maps the name of a \p Function to a pair. The
+ /// first member of the pair is the IR count of the \p Function before running
+ /// \p P, and the second member is the IR count of the \p Function after
+ /// running \p P.
+ void emitInstrCountChangedRemark(
+ Pass *P, Module &M, int64_t Delta, unsigned CountBefore,
+ StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount,
+ Function *F = nullptr);
protected:
// Top level manager.
@@ -508,7 +520,6 @@ public:
}
};
-Timer *getPassTimer(Pass *);
}
#endif
diff --git a/contrib/llvm/include/llvm/IR/Metadata.h b/contrib/llvm/include/llvm/IR/Metadata.h
index 9ac97f4224ac..be82c4efc115 100644
--- a/contrib/llvm/include/llvm/IR/Metadata.h
+++ b/contrib/llvm/include/llvm/IR/Metadata.h
@@ -66,9 +66,11 @@ protected:
enum StorageType { Uniqued, Distinct, Temporary };
/// Storage flag for non-uniqued, otherwise unowned, metadata.
- unsigned char Storage;
+ unsigned char Storage : 7;
// TODO: expose remaining bits to subclasses.
+ unsigned char ImplicitCode : 1;
+
unsigned short SubclassData16 = 0;
unsigned SubclassData32 = 0;
@@ -80,7 +82,7 @@ public:
protected:
Metadata(unsigned ID, StorageType Storage)
- : SubclassID(ID), Storage(Storage) {
+ : SubclassID(ID), Storage(Storage), ImplicitCode(false) {
static_assert(sizeof(*this) == 8, "Metadata fields poorly packed");
}
@@ -1316,10 +1318,11 @@ public:
//===----------------------------------------------------------------------===//
/// A tuple of MDNodes.
///
-/// Despite its name, a NamedMDNode isn't itself an MDNode. NamedMDNodes belong
-/// to modules, have names, and contain lists of MDNodes.
+/// Despite its name, a NamedMDNode isn't itself an MDNode.
+///
+/// NamedMDNodes are named module-level entities that contain lists of MDNodes.
///
-/// TODO: Inherit from Metadata.
+/// It is illegal for a NamedMDNode to appear as an operand of an MDNode.
class NamedMDNode : public ilist_node<NamedMDNode> {
friend class LLVMContextImpl;
friend class Module;
@@ -1420,6 +1423,9 @@ public:
}
};
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(NamedMDNode, LLVMNamedMDNodeRef)
+
} // end namespace llvm
#endif // LLVM_IR_METADATA_H
diff --git a/contrib/llvm/include/llvm/IR/Module.h b/contrib/llvm/include/llvm/IR/Module.h
index a405f7df3efe..9ef35f1f73cd 100644
--- a/contrib/llvm/include/llvm/IR/Module.h
+++ b/contrib/llvm/include/llvm/IR/Module.h
@@ -16,6 +16,7 @@
#define LLVM_IR_MODULE_H
#include "llvm-c/Types.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -48,6 +49,7 @@ class MemoryBuffer;
class RandomNumberGenerator;
template <class PtrType> class SmallPtrSetImpl;
class StructType;
+class VersionTuple;
/// A Module instance is used to store all the information related to an
/// LLVM module. Modules are the top level container of all other LLVM
@@ -365,6 +367,11 @@ public:
return getOrInsertFunction(Name, AttributeList{}, RetTy, Args...);
}
+ // Avoid an incorrect ordering that'd otherwise compile incorrectly.
+ template <typename... ArgsTy>
+ Constant *getOrInsertFunction(StringRef Name, AttributeList AttributeList,
+ FunctionType *Invalid, ArgsTy... Args) = delete;
+
/// Look up the specified function in the module symbol table. If it does not
/// exist, return null.
Function *getFunction(StringRef Name) const;
@@ -401,11 +408,15 @@ public:
}
/// Look up the specified global in the module symbol table.
- /// 1. If it does not exist, add a declaration of the global and return it.
- /// 2. Else, the global exists but has the wrong type: return the function
- /// with a constantexpr cast to the right type.
- /// 3. Finally, if the existing global is the correct declaration, return
- /// the existing global.
+ /// If it does not exist, invoke a callback to create a declaration of the
+ /// global and return it. The global is constantexpr casted to the expected
+ /// type if necessary.
+ Constant *
+ getOrInsertGlobal(StringRef Name, Type *Ty,
+ function_ref<GlobalVariable *()> CreateGlobalCallback);
+
+ /// Look up the specified global in the module symbol table. If required, this
+ /// overload constructs the global variable using its constructor's defaults.
Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
/// @}
@@ -840,6 +851,17 @@ public:
void setPIELevel(PIELevel::Level PL);
/// @}
+ /// @}
+ /// @name Utility function for querying and setting code model
+ /// @{
+
+ /// Returns the code model (tiny, small, kernel, medium or large model)
+ Optional<CodeModel::Model> getCodeModel() const;
+
+ /// Set the code model (tiny, small, kernel, medium or large)
+ void setCodeModel(CodeModel::Model CL);
+ /// @}
+
/// @name Utility functions for querying and setting PGO summary
/// @{
@@ -856,6 +878,17 @@ public:
/// Set that PLT should be avoid for RTLib calls.
void setRtLibUseGOT();
+ /// @name Utility functions for querying and setting the build SDK version
+ /// @{
+
+ /// Attach a build SDK version metadata to this module.
+ void setSDKVersion(const VersionTuple &V);
+
+ /// Get the build SDK version metadata.
+ ///
+ /// An empty version is returned if no such metadata is attached.
+ VersionTuple getSDKVersion() const;
+ /// @}
/// Take ownership of the given memory buffer.
void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);
diff --git a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
index fdf3d4b5f1ce..a1acee494475 100644
--- a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Allocator.h"
@@ -99,6 +100,22 @@ struct CalleeInfo {
}
};
+inline const char *getHotnessName(CalleeInfo::HotnessType HT) {
+ switch (HT) {
+ case CalleeInfo::HotnessType::Unknown:
+ return "unknown";
+ case CalleeInfo::HotnessType::Cold:
+ return "cold";
+ case CalleeInfo::HotnessType::None:
+ return "none";
+ case CalleeInfo::HotnessType::Hot:
+ return "hot";
+ case CalleeInfo::HotnessType::Critical:
+ return "critical";
+ }
+ llvm_unreachable("invalid hotness");
+}
+
class GlobalValueSummary;
using GlobalValueSummaryList = std::vector<std::unique_ptr<GlobalValueSummary>>;
@@ -146,13 +163,13 @@ using GlobalValueSummaryMapTy =
/// Struct that holds a reference to a particular GUID in a global value
/// summary.
struct ValueInfo {
- PointerIntPair<const GlobalValueSummaryMapTy::value_type *, 1, bool>
- RefAndFlag;
+ PointerIntPair<const GlobalValueSummaryMapTy::value_type *, 2, int>
+ RefAndFlags;
ValueInfo() = default;
ValueInfo(bool HaveGVs, const GlobalValueSummaryMapTy::value_type *R) {
- RefAndFlag.setPointer(R);
- RefAndFlag.setInt(HaveGVs);
+ RefAndFlags.setPointer(R);
+ RefAndFlags.setInt(HaveGVs);
}
operator bool() const { return getRef(); }
@@ -172,10 +189,12 @@ struct ValueInfo {
: getRef()->second.U.Name;
}
- bool haveGVs() const { return RefAndFlag.getInt(); }
+ bool haveGVs() const { return RefAndFlags.getInt() & 0x1; }
+ bool isReadOnly() const { return RefAndFlags.getInt() & 0x2; }
+ void setReadOnly() { RefAndFlags.setInt(RefAndFlags.getInt() | 0x2); }
const GlobalValueSummaryMapTy::value_type *getRef() const {
- return RefAndFlag.getPointer();
+ return RefAndFlags.getPointer();
}
bool isDSOLocal() const;
@@ -391,6 +410,7 @@ public:
return const_cast<GlobalValueSummary &>(
static_cast<const AliasSummary *>(this)->getAliasee());
}
+ bool hasAliaseeGUID() const { return AliaseeGUID != 0; }
const GlobalValue::GUID &getAliaseeGUID() const {
assert(AliaseeGUID && "Unexpected missing aliasee GUID");
return AliaseeGUID;
@@ -460,13 +480,17 @@ public:
TypeCheckedLoadConstVCalls;
};
- /// Function attribute flags. Used to track if a function accesses memory,
- /// recurses or aliases.
+ /// Flags specific to function summaries.
struct FFlags {
+ // Function attribute flags. Used to track if a function accesses memory,
+ // recurses or aliases.
unsigned ReadNone : 1;
unsigned ReadOnly : 1;
unsigned NoRecurse : 1;
unsigned ReturnDoesNotAlias : 1;
+
+ // Indicate if the global value cannot be inlined.
+ unsigned NoInline : 1;
};
/// Create an empty FunctionSummary (with specified call edges).
@@ -477,8 +501,9 @@ public:
FunctionSummary::GVFlags(
GlobalValue::LinkageTypes::AvailableExternallyLinkage,
/*NotEligibleToImport=*/true, /*Live=*/true, /*IsLocal=*/false),
- 0, FunctionSummary::FFlags{}, std::vector<ValueInfo>(),
- std::move(Edges), std::vector<GlobalValue::GUID>(),
+ /*InsCount=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0,
+ std::vector<ValueInfo>(), std::move(Edges),
+ std::vector<GlobalValue::GUID>(),
std::vector<FunctionSummary::VFuncId>(),
std::vector<FunctionSummary::VFuncId>(),
std::vector<FunctionSummary::ConstVCall>(),
@@ -493,10 +518,14 @@ private:
/// during the initial compile step when the summary index is first built.
unsigned InstCount;
- /// Function attribute flags. Used to track if a function accesses memory,
- /// recurses or aliases.
+ /// Function summary specific flags.
FFlags FunFlags;
+ /// The synthesized entry count of the function.
+ /// This is only populated during ThinLink phase and remains unused while
+ /// generating per-module summaries.
+ uint64_t EntryCount = 0;
+
/// List of <CalleeValueInfo, CalleeInfo> call edge pairs from this function.
std::vector<EdgeTy> CallGraphEdgeList;
@@ -504,14 +533,15 @@ private:
public:
FunctionSummary(GVFlags Flags, unsigned NumInsts, FFlags FunFlags,
- std::vector<ValueInfo> Refs, std::vector<EdgeTy> CGEdges,
+ uint64_t EntryCount, std::vector<ValueInfo> Refs,
+ std::vector<EdgeTy> CGEdges,
std::vector<GlobalValue::GUID> TypeTests,
std::vector<VFuncId> TypeTestAssumeVCalls,
std::vector<VFuncId> TypeCheckedLoadVCalls,
std::vector<ConstVCall> TypeTestAssumeConstVCalls,
std::vector<ConstVCall> TypeCheckedLoadConstVCalls)
: GlobalValueSummary(FunctionKind, Flags, std::move(Refs)),
- InstCount(NumInsts), FunFlags(FunFlags),
+ InstCount(NumInsts), FunFlags(FunFlags), EntryCount(EntryCount),
CallGraphEdgeList(std::move(CGEdges)) {
if (!TypeTests.empty() || !TypeTestAssumeVCalls.empty() ||
!TypeCheckedLoadVCalls.empty() || !TypeTestAssumeConstVCalls.empty() ||
@@ -522,18 +552,26 @@ public:
std::move(TypeTestAssumeConstVCalls),
std::move(TypeCheckedLoadConstVCalls)});
}
+ // Gets the number of immutable refs in RefEdgeList
+ unsigned immutableRefCount() const;
/// Check if this is a function summary.
static bool classof(const GlobalValueSummary *GVS) {
return GVS->getSummaryKind() == FunctionKind;
}
- /// Get function attribute flags.
+ /// Get function summary flags.
FFlags fflags() const { return FunFlags; }
/// Get the instruction count recorded for this function.
unsigned instCount() const { return InstCount; }
+ /// Get the synthetic entry count for this function.
+ uint64_t entryCount() const { return EntryCount; }
+
+ /// Set the synthetic entry count for this function.
+ void setEntryCount(uint64_t EC) { EntryCount = EC; }
+
/// Return the list of <CalleeValueInfo, CalleeInfo> pairs.
ArrayRef<EdgeTy> calls() const { return CallGraphEdgeList; }
@@ -631,19 +669,30 @@ template <> struct DenseMapInfo<FunctionSummary::ConstVCall> {
/// Global variable summary information to aid decisions and
/// implementation of importing.
///
-/// Currently this doesn't add anything to the base \p GlobalValueSummary,
-/// but is a placeholder as additional info may be added to the summary
-/// for variables.
+/// Global variable summary has extra flag, telling if it is
+/// modified during the program run or not. This affects ThinLTO
+/// internalization
class GlobalVarSummary : public GlobalValueSummary {
-
public:
- GlobalVarSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
- : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)) {}
+ struct GVarFlags {
+ GVarFlags(bool ReadOnly = false) : ReadOnly(ReadOnly) {}
+
+ unsigned ReadOnly : 1;
+ } VarFlags;
+
+ GlobalVarSummary(GVFlags Flags, GVarFlags VarFlags,
+ std::vector<ValueInfo> Refs)
+ : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)),
+ VarFlags(VarFlags) {}
/// Check if this is a global variable summary.
static bool classof(const GlobalValueSummary *GVS) {
return GVS->getSummaryKind() == GlobalVarKind;
}
+
+ GVarFlags varflags() const { return VarFlags; }
+ void setReadOnly(bool RO) { VarFlags.ReadOnly = RO; }
+ bool isReadOnly() const { return VarFlags.ReadOnly; }
};
struct TypeTestResolution {
@@ -737,6 +786,11 @@ using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>;
/// a particular module, and provide efficient access to their summary.
using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
+/// Map of a type GUID to type id string and summary (multimap used
+/// in case of GUID conflicts).
+using TypeIdSummaryMapTy =
+ std::multimap<GlobalValue::GUID, std::pair<std::string, TypeIdSummary>>;
+
/// Class to hold module path string table and global value map,
/// and encapsulate methods for operating on them.
class ModuleSummaryIndex {
@@ -748,9 +802,9 @@ private:
/// Holds strings for combined index, mapping to the corresponding module ID.
ModulePathStringTableTy ModulePathStringTable;
- /// Mapping from type identifiers to summary information for that type
- /// identifier.
- std::map<std::string, TypeIdSummary> TypeIdMap;
+ /// Mapping from type identifier GUIDs to type identifier and its summary
+ /// information.
+ TypeIdSummaryMapTy TypeIdMap;
/// Mapping from original ID to GUID. If original ID can map to multiple
/// GUIDs, it will be mapped to 0.
@@ -761,6 +815,9 @@ private:
/// considered live.
bool WithGlobalValueDeadStripping = false;
+ /// Indicates that summary-based synthetic entry count propagation has run
+ bool HasSyntheticEntryCounts = false;
+
/// Indicates that distributed backend should skip compilation of the
/// module. Flag is suppose to be set by distributed ThinLTO indexing
/// when it detected that the module is not needed during the final
@@ -774,6 +831,13 @@ private:
/// union.
bool HaveGVs;
+ // True if the index was created for a module compiled with -fsplit-lto-unit.
+ bool EnableSplitLTOUnit;
+
+ // True if some of the modules were compiled with -fsplit-lto-unit and
+ // some were not. Set when the combined index is created during the thin link.
+ bool PartiallySplitLTOUnits = false;
+
std::set<std::string> CfiFunctionDefs;
std::set<std::string> CfiFunctionDecls;
@@ -793,7 +857,9 @@ private:
public:
// See HaveGVs variable comment.
- ModuleSummaryIndex(bool HaveGVs) : HaveGVs(HaveGVs), Saver(Alloc) {}
+ ModuleSummaryIndex(bool HaveGVs, bool EnableSplitLTOUnit = false)
+ : HaveGVs(HaveGVs), EnableSplitLTOUnit(EnableSplitLTOUnit), Saver(Alloc) {
+ }
bool haveGVs() const { return HaveGVs; }
@@ -873,6 +939,9 @@ public:
WithGlobalValueDeadStripping = true;
}
+ bool hasSyntheticEntryCounts() const { return HasSyntheticEntryCounts; }
+ void setHasSyntheticEntryCounts() { HasSyntheticEntryCounts = true; }
+
bool skipModuleByDistributedBackend() const {
return SkipModuleByDistributedBackend;
}
@@ -880,6 +949,12 @@ public:
SkipModuleByDistributedBackend = true;
}
+ bool enableSplitLTOUnit() const { return EnableSplitLTOUnit; }
+ void setEnableSplitLTOUnit() { EnableSplitLTOUnit = true; }
+
+ bool partiallySplitLTOUnits() const { return PartiallySplitLTOUnits; }
+ void setPartiallySplitLTOUnits() { PartiallySplitLTOUnits = true; }
+
bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
return !WithGlobalValueDeadStripping || GVS->isLive();
}
@@ -905,7 +980,7 @@ public:
// Save a string in the Index. Use before passing Name to
// getOrInsertValueInfo when the string isn't owned elsewhere (e.g. on the
// module's Strtab).
- StringRef saveString(std::string String) { return Saver.save(String); }
+ StringRef saveString(StringRef String) { return Saver.save(String); }
/// Return a ValueInfo for \p GUID setting value \p Name.
ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID, StringRef Name) {
@@ -1063,23 +1138,29 @@ public:
return ModulePathStringTable.count(M.getModuleIdentifier());
}
- const std::map<std::string, TypeIdSummary> &typeIds() const {
- return TypeIdMap;
- }
+ const TypeIdSummaryMapTy &typeIds() const { return TypeIdMap; }
- /// This accessor should only be used when exporting because it can mutate the
- /// map.
+ /// Return an existing or new TypeIdSummary entry for \p TypeId.
+ /// This accessor can mutate the map and therefore should not be used in
+ /// the ThinLTO backends.
TypeIdSummary &getOrInsertTypeIdSummary(StringRef TypeId) {
- return TypeIdMap[TypeId];
+ auto TidIter = TypeIdMap.equal_range(GlobalValue::getGUID(TypeId));
+ for (auto It = TidIter.first; It != TidIter.second; ++It)
+ if (It->second.first == TypeId)
+ return It->second.second;
+ auto It = TypeIdMap.insert(
+ {GlobalValue::getGUID(TypeId), {TypeId, TypeIdSummary()}});
+ return It->second.second;
}
/// This returns either a pointer to the type id summary (if present in the
/// summary map) or null (if not present). This may be used when importing.
const TypeIdSummary *getTypeIdSummary(StringRef TypeId) const {
- auto I = TypeIdMap.find(TypeId);
- if (I == TypeIdMap.end())
- return nullptr;
- return &I->second;
+ auto TidIter = TypeIdMap.equal_range(GlobalValue::getGUID(TypeId));
+ for (auto It = TidIter.first; It != TidIter.second; ++It)
+ if (It->second.first == TypeId)
+ return &It->second.second;
+ return nullptr;
}
/// Collect for the given module the list of functions it defines
@@ -1103,11 +1184,15 @@ public:
/// Print out strongly connected components for debugging.
void dumpSCCs(raw_ostream &OS);
+
+ /// Analyze index and detect unmodified globals
+ void propagateConstants(const DenseSet<GlobalValue::GUID> &PreservedSymbols);
};
/// GraphTraits definition to build SCC for the index
template <> struct GraphTraits<ValueInfo> {
typedef ValueInfo NodeRef;
+ using EdgeRef = FunctionSummary::EdgeTy &;
static NodeRef valueInfoFromEdge(FunctionSummary::EdgeTy &P) {
return P.first;
@@ -1116,6 +1201,8 @@ template <> struct GraphTraits<ValueInfo> {
mapped_iterator<std::vector<FunctionSummary::EdgeTy>::iterator,
decltype(&valueInfoFromEdge)>;
+ using ChildEdgeIteratorType = std::vector<FunctionSummary::EdgeTy>::iterator;
+
static NodeRef getEntryNode(ValueInfo V) { return V; }
static ChildIteratorType child_begin(NodeRef N) {
@@ -1137,6 +1224,26 @@ template <> struct GraphTraits<ValueInfo> {
cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
return ChildIteratorType(F->CallGraphEdgeList.end(), &valueInfoFromEdge);
}
+
+ static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
+ if (!N.getSummaryList().size()) // handle external function
+ return FunctionSummary::ExternalNode.CallGraphEdgeList.begin();
+
+ FunctionSummary *F =
+ cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
+ return F->CallGraphEdgeList.begin();
+ }
+
+ static ChildEdgeIteratorType child_edge_end(NodeRef N) {
+ if (!N.getSummaryList().size()) // handle external function
+ return FunctionSummary::ExternalNode.CallGraphEdgeList.end();
+
+ FunctionSummary *F =
+ cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
+ return F->CallGraphEdgeList.end();
+ }
+
+ static NodeRef edge_dest(EdgeRef E) { return E.first; }
};
template <>
@@ -1152,6 +1259,14 @@ struct GraphTraits<ModuleSummaryIndex *> : public GraphTraits<ValueInfo> {
}
};
+static inline bool canImportGlobalVar(GlobalValueSummary *S) {
+ assert(isa<GlobalVarSummary>(S->getBaseObject()));
+
+ // We don't import GV with references, because it can result
+ // in promotion of local variables in the source module.
+ return !GlobalValue::isInterposableLinkage(S->linkage()) &&
+ !S->notEligibleToImport() && S->refs().empty();
+}
} // end namespace llvm
#endif // LLVM_IR_MODULESUMMARYINDEX_H
diff --git a/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h b/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
index 1b339ab32cf1..a88ee26b51c3 100644
--- a/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -195,7 +195,6 @@ template <> struct MappingTraits<FunctionSummaryYaml> {
} // End yaml namespace
} // End llvm namespace
-LLVM_YAML_IS_STRING_MAP(TypeIdSummary)
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)
namespace llvm {
@@ -225,7 +224,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
GlobalValueSummary::GVFlags(
static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal),
- 0, FunctionSummary::FFlags{}, Refs,
+ /*NumInsts=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0, Refs,
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
std::move(FSum.TypeCheckedLoadVCalls),
@@ -258,6 +257,18 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
}
};
+template <> struct CustomMappingTraits<TypeIdSummaryMapTy> {
+ static void inputOne(IO &io, StringRef Key, TypeIdSummaryMapTy &V) {
+ TypeIdSummary TId;
+ io.mapRequired(Key.str().c_str(), TId);
+ V.insert({GlobalValue::getGUID(Key), {Key, TId}});
+ }
+ static void output(IO &io, TypeIdSummaryMapTy &V) {
+ for (auto TidIter = V.begin(); TidIter != V.end(); TidIter++)
+ io.mapRequired(TidIter->second.first.c_str(), TidIter->second.second);
+ }
+};
+
template <> struct MappingTraits<ModuleSummaryIndex> {
static void mapping(IO &io, ModuleSummaryIndex& index) {
io.mapOptional("GlobalValueMap", index.GlobalValueMap);
diff --git a/contrib/llvm/include/llvm/IR/Operator.h b/contrib/llvm/include/llvm/IR/Operator.h
index 939cec7f4aa4..6b387bbcccb1 100644
--- a/contrib/llvm/include/llvm/IR/Operator.h
+++ b/contrib/llvm/include/llvm/IR/Operator.h
@@ -364,19 +364,26 @@ public:
/// precision.
float getFPAccuracy() const;
- static bool classof(const Instruction *I) {
- return I->getType()->isFPOrFPVectorTy() ||
- I->getOpcode() == Instruction::FCmp;
- }
-
- static bool classof(const ConstantExpr *CE) {
- return CE->getType()->isFPOrFPVectorTy() ||
- CE->getOpcode() == Instruction::FCmp;
- }
-
static bool classof(const Value *V) {
- return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
- (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+ unsigned Opcode;
+ if (auto *I = dyn_cast<Instruction>(V))
+ Opcode = I->getOpcode();
+ else if (auto *CE = dyn_cast<ConstantExpr>(V))
+ Opcode = CE->getOpcode();
+ else
+ return false;
+
+ switch (Opcode) {
+ case Instruction::FCmp:
+ return true;
+ // non math FP Operators (no FMF)
+ case Instruction::ExtractElement:
+ case Instruction::ShuffleVector:
+ case Instruction::InsertElement:
+ return false;
+ default:
+ return V->getType()->isFPOrFPVectorTy();
+ }
}
};
diff --git a/contrib/llvm/include/llvm/IR/PassInstrumentation.h b/contrib/llvm/include/llvm/IR/PassInstrumentation.h
new file mode 100644
index 000000000000..08dac1c4a274
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PassInstrumentation.h
@@ -0,0 +1,207 @@
+//===- llvm/IR/PassInstrumentation.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the Pass Instrumentation classes that provide
+/// instrumentation points into the pass execution by PassManager.
+///
+/// There are two main classes:
+/// - PassInstrumentation provides a set of instrumentation points for
+/// pass managers to call on.
+///
+/// - PassInstrumentationCallbacks registers callbacks and provides access
+/// to them for PassInstrumentation.
+///
+/// PassInstrumentation object is being used as a result of
+/// PassInstrumentationAnalysis (so it is intended to be easily copyable).
+///
+/// Intended scheme of use for Pass Instrumentation is as follows:
+/// - register instrumentation callbacks in PassInstrumentationCallbacks
+/// instance. PassBuilder provides helper for that.
+///
+/// - register PassInstrumentationAnalysis with all the PassManagers.
+/// PassBuilder handles that automatically when registering analyses.
+///
+/// - Pass Manager requests PassInstrumentationAnalysis from analysis manager
+/// and gets PassInstrumentation as its result.
+///
+/// - Pass Manager invokes PassInstrumentation entry points appropriately,
+/// passing StringRef identification ("name") of the pass currently being
+/// executed and IRUnit it works on. There can be different schemes of
+/// providing names in future, currently it is just a name() of the pass.
+///
+/// - PassInstrumentation wraps address of IRUnit into llvm::Any and passes
+/// control to all the registered callbacks. Note that we specifically wrap
+/// 'const IRUnitT*' so as to avoid any accidental changes to IR in
+/// instrumenting callbacks.
+///
+/// - Some instrumentation points (BeforePass) allow to control execution
+/// of a pass. For those callbacks returning false means pass will not be
+/// executed.
+///
+/// TODO: currently there is no way for a pass to opt-out of execution control
+/// (e.g. become unskippable). PassManager is the only entity that determines
+/// how pass instrumentation affects pass execution.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSINSTRUMENTATION_H
+#define LLVM_IR_PASSINSTRUMENTATION_H
+
+#include "llvm/ADT/Any.h"
+#include "llvm/ADT/FunctionExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/TypeName.h"
+#include <type_traits>
+
+namespace llvm {
+
+class PreservedAnalyses;
+
+/// This class manages callbacks registration, as well as provides a way for
+/// PassInstrumentation to pass control to the registered callbacks.
+class PassInstrumentationCallbacks {
+public:
+ // Before/After callbacks accept IRUnits whenever appropriate, so they need
+ // to take them as constant pointers, wrapped with llvm::Any.
+ // For the case when IRUnit has been invalidated there is a different
+ // callback to use - AfterPassInvalidated.
+ // TODO: currently AfterPassInvalidated does not accept IRUnit, since passing
+ // already invalidated IRUnit is unsafe. There are ways to handle invalidated IRUnits
+ // in a safe way, and we might pursue that as soon as there is a useful instrumentation
+ // that needs it.
+ using BeforePassFunc = bool(StringRef, Any);
+ using AfterPassFunc = void(StringRef, Any);
+ using AfterPassInvalidatedFunc = void(StringRef);
+ using BeforeAnalysisFunc = void(StringRef, Any);
+ using AfterAnalysisFunc = void(StringRef, Any);
+
+public:
+ PassInstrumentationCallbacks() {}
+
+ /// Copying PassInstrumentationCallbacks is not intended.
+ PassInstrumentationCallbacks(const PassInstrumentationCallbacks &) = delete;
+ void operator=(const PassInstrumentationCallbacks &) = delete;
+
+ template <typename CallableT> void registerBeforePassCallback(CallableT C) {
+ BeforePassCallbacks.emplace_back(std::move(C));
+ }
+
+ template <typename CallableT> void registerAfterPassCallback(CallableT C) {
+ AfterPassCallbacks.emplace_back(std::move(C));
+ }
+
+ template <typename CallableT>
+ void registerAfterPassInvalidatedCallback(CallableT C) {
+ AfterPassInvalidatedCallbacks.emplace_back(std::move(C));
+ }
+
+ template <typename CallableT>
+ void registerBeforeAnalysisCallback(CallableT C) {
+ BeforeAnalysisCallbacks.emplace_back(std::move(C));
+ }
+
+ template <typename CallableT>
+ void registerAfterAnalysisCallback(CallableT C) {
+ AfterAnalysisCallbacks.emplace_back(std::move(C));
+ }
+
+private:
+ friend class PassInstrumentation;
+
+ SmallVector<llvm::unique_function<BeforePassFunc>, 4> BeforePassCallbacks;
+ SmallVector<llvm::unique_function<AfterPassFunc>, 4> AfterPassCallbacks;
+ SmallVector<llvm::unique_function<AfterPassInvalidatedFunc>, 4>
+ AfterPassInvalidatedCallbacks;
+ SmallVector<llvm::unique_function<BeforeAnalysisFunc>, 4>
+ BeforeAnalysisCallbacks;
+ SmallVector<llvm::unique_function<AfterAnalysisFunc>, 4>
+ AfterAnalysisCallbacks;
+};
+
+/// This class provides instrumentation entry points for the Pass Manager,
+/// doing calls to callbacks registered in PassInstrumentationCallbacks.
+class PassInstrumentation {
+ PassInstrumentationCallbacks *Callbacks;
+
+public:
+ /// Callbacks object is not owned by PassInstrumentation, its life-time
+ /// should at least match the life-time of corresponding
+ /// PassInstrumentationAnalysis (which usually is till the end of current
+ /// compilation).
+ PassInstrumentation(PassInstrumentationCallbacks *CB = nullptr)
+ : Callbacks(CB) {}
+
+ /// BeforePass instrumentation point - takes \p Pass instance to be executed
+ /// and constant reference to IR it operates on. \Returns true if pass is
+ /// allowed to be executed.
+ template <typename IRUnitT, typename PassT>
+ bool runBeforePass(const PassT &Pass, const IRUnitT &IR) const {
+ if (!Callbacks)
+ return true;
+
+ bool ShouldRun = true;
+ for (auto &C : Callbacks->BeforePassCallbacks)
+ ShouldRun &= C(Pass.name(), llvm::Any(&IR));
+ return ShouldRun;
+ }
+
+ /// AfterPass instrumentation point - takes \p Pass instance that has
+ /// just been executed and constant reference to \p IR it operates on.
+ /// \p IR is guaranteed to be valid at this point.
+ template <typename IRUnitT, typename PassT>
+ void runAfterPass(const PassT &Pass, const IRUnitT &IR) const {
+ if (Callbacks)
+ for (auto &C : Callbacks->AfterPassCallbacks)
+ C(Pass.name(), llvm::Any(&IR));
+ }
+
+ /// AfterPassInvalidated instrumentation point - takes \p Pass instance
+ /// that has just been executed. For use when IR has been invalidated
+ /// by \p Pass execution.
+ template <typename IRUnitT, typename PassT>
+ void runAfterPassInvalidated(const PassT &Pass) const {
+ if (Callbacks)
+ for (auto &C : Callbacks->AfterPassInvalidatedCallbacks)
+ C(Pass.name());
+ }
+
+ /// BeforeAnalysis instrumentation point - takes \p Analysis instance
+ /// to be executed and constant reference to IR it operates on.
+ template <typename IRUnitT, typename PassT>
+ void runBeforeAnalysis(const PassT &Analysis, const IRUnitT &IR) const {
+ if (Callbacks)
+ for (auto &C : Callbacks->BeforeAnalysisCallbacks)
+ C(Analysis.name(), llvm::Any(&IR));
+ }
+
+ /// AfterAnalysis instrumentation point - takes \p Analysis instance
+ /// that has just been executed and constant reference to IR it operated on.
+ template <typename IRUnitT, typename PassT>
+ void runAfterAnalysis(const PassT &Analysis, const IRUnitT &IR) const {
+ if (Callbacks)
+ for (auto &C : Callbacks->AfterAnalysisCallbacks)
+ C(Analysis.name(), llvm::Any(&IR));
+ }
+
+ /// Handle invalidation from the pass manager when PassInstrumentation
+ /// is used as the result of PassInstrumentationAnalysis.
+ ///
+ /// On attempt to invalidate just return false. There is nothing to become
+ /// invalid here.
+ template <typename IRUnitT, typename... ExtraArgsT>
+ bool invalidate(IRUnitT &, const class llvm::PreservedAnalyses &,
+ ExtraArgsT...) {
+ return false;
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PassManager.h b/contrib/llvm/include/llvm/IR/PassManager.h
index a5d4aaf71c0e..738a2242eea0 100644
--- a/contrib/llvm/include/llvm/IR/PassManager.h
+++ b/contrib/llvm/include/llvm/IR/PassManager.h
@@ -44,6 +44,7 @@
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/PassInstrumentation.h"
#include "llvm/IR/PassManagerInternal.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TypeName.h"
@@ -402,6 +403,43 @@ struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
}
};
+namespace detail {
+
+/// Actual unpacker of extra arguments in getAnalysisResult,
+/// passes only those tuple arguments that are mentioned in index_sequence.
+template <typename PassT, typename IRUnitT, typename AnalysisManagerT,
+ typename... ArgTs, size_t... Ns>
+typename PassT::Result
+getAnalysisResultUnpackTuple(AnalysisManagerT &AM, IRUnitT &IR,
+ std::tuple<ArgTs...> Args,
+ llvm::index_sequence<Ns...>) {
+ (void)Args;
+ return AM.template getResult<PassT>(IR, std::get<Ns>(Args)...);
+}
+
+/// Helper for *partial* unpacking of extra arguments in getAnalysisResult.
+///
+/// Arguments passed in tuple come from PassManager, so they might have extra
+/// arguments after those AnalysisManager's ExtraArgTs ones that we need to
+/// pass to getResult.
+template <typename PassT, typename IRUnitT, typename... AnalysisArgTs,
+ typename... MainArgTs>
+typename PassT::Result
+getAnalysisResult(AnalysisManager<IRUnitT, AnalysisArgTs...> &AM, IRUnitT &IR,
+ std::tuple<MainArgTs...> Args) {
+ return (getAnalysisResultUnpackTuple<
+ PassT, IRUnitT>)(AM, IR, Args,
+ llvm::index_sequence_for<AnalysisArgTs...>{});
+}
+
+} // namespace detail
+
+// Forward declare the pass instrumentation analysis explicitly queried in
+// generic PassManager code.
+// FIXME: figure out a way to move PassInstrumentationAnalysis into its own
+// header.
+class PassInstrumentationAnalysis;
+
/// Manages a sequence of passes over a particular unit of IR.
///
/// A pass manager contains a sequence of passes to run over a particular unit
@@ -445,15 +483,34 @@ public:
ExtraArgTs... ExtraArgs) {
PreservedAnalyses PA = PreservedAnalyses::all();
+ // Request PassInstrumentation from analysis manager, will use it to run
+ // instrumenting callbacks for the passes later.
+ // Here we use std::tuple wrapper over getResult which helps to extract
+ // AnalysisManager's arguments out of the whole ExtraArgs set.
+ PassInstrumentation PI =
+ detail::getAnalysisResult<PassInstrumentationAnalysis>(
+ AM, IR, std::tuple<ExtraArgTs...>(ExtraArgs...));
+
if (DebugLogging)
dbgs() << "Starting " << getTypeName<IRUnitT>() << " pass manager run.\n";
for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+ auto *P = Passes[Idx].get();
if (DebugLogging)
- dbgs() << "Running pass: " << Passes[Idx]->name() << " on "
- << IR.getName() << "\n";
+ dbgs() << "Running pass: " << P->name() << " on " << IR.getName()
+ << "\n";
- PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM, ExtraArgs...);
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // pass, skip its execution completely if asked to (callback returns
+ // false).
+ if (!PI.runBeforePass<IRUnitT>(*P, IR))
+ continue;
+
+ PreservedAnalyses PassPA = P->run(IR, AM, ExtraArgs...);
+
+ // Call onto PassInstrumentation's AfterPass callbacks immediately after
+ // running the pass.
+ PI.runAfterPass<IRUnitT>(*P, IR);
// Update the analysis manager as each pass runs and potentially
// invalidates analyses.
@@ -510,6 +567,32 @@ extern template class PassManager<Function>;
/// Convenience typedef for a pass manager over functions.
using FunctionPassManager = PassManager<Function>;
+/// Pseudo-analysis pass that exposes the \c PassInstrumentation to pass
+/// managers. Goes before AnalysisManager definition to provide its
+/// internals (e.g PassInstrumentationAnalysis::ID) for use there if needed.
+/// FIXME: figure out a way to move PassInstrumentationAnalysis into its own
+/// header.
+class PassInstrumentationAnalysis
+ : public AnalysisInfoMixin<PassInstrumentationAnalysis> {
+ friend AnalysisInfoMixin<PassInstrumentationAnalysis>;
+ static AnalysisKey Key;
+
+ PassInstrumentationCallbacks *Callbacks;
+
+public:
+ /// PassInstrumentationCallbacks object is shared, owned by something else,
+ /// not this analysis.
+ PassInstrumentationAnalysis(PassInstrumentationCallbacks *Callbacks = nullptr)
+ : Callbacks(Callbacks) {}
+
+ using Result = PassInstrumentation;
+
+ template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+ Result run(IRUnitT &, AnalysisManagerT &, ExtraArgTs &&...) {
+ return PassInstrumentation(Callbacks);
+ }
+};
+
/// A container for analyses that lazily runs them and caches their
/// results.
///
@@ -860,9 +943,18 @@ private:
if (DebugLogging)
dbgs() << "Running analysis: " << P.name() << " on " << IR.getName()
<< "\n";
+
+ PassInstrumentation PI;
+ if (ID != PassInstrumentationAnalysis::ID()) {
+ PI = getResult<PassInstrumentationAnalysis>(IR, ExtraArgs...);
+ PI.runBeforeAnalysis(P, IR);
+ }
+
AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));
+ PI.runAfterAnalysis(P, IR);
+
// P.run may have inserted elements into AnalysisResults and invalidated
// RI.
RI = AnalysisResults.find({ID, &IR});
@@ -930,7 +1022,7 @@ using FunctionAnalysisManager = AnalysisManager<Function>;
/// analysis manager over an "inner" IR unit. The inner unit must be contained
/// in the outer unit.
///
-/// Fore example, InnerAnalysisManagerProxy<FunctionAnalysisManager, Module> is
+/// For example, InnerAnalysisManagerProxy<FunctionAnalysisManager, Module> is
/// an analysis over Modules (the "outer" unit) that provides access to a
/// Function analysis manager. The FunctionAnalysisManager is the "inner"
/// manager being proxied, and Functions are the "inner" unit. The inner/outer
@@ -1192,13 +1284,24 @@ public:
FunctionAnalysisManager &FAM =
AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+ // Request PassInstrumentation from analysis manager, will use it to run
+ // instrumenting callbacks for the passes later.
+ PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
+
PreservedAnalyses PA = PreservedAnalyses::all();
for (Function &F : M) {
if (F.isDeclaration())
continue;
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // pass, skip its execution completely if asked to (callback returns
+ // false).
+ if (!PI.runBeforePass<Function>(Pass, F))
+ continue;
PreservedAnalyses PassPA = Pass.run(F, FAM);
+ PI.runAfterPass(Pass, F);
+
// We know that the function pass couldn't have invalidated any other
// function's analyses (that's the contract of a function pass), so
// directly handle the function analysis manager's invalidation here.
@@ -1302,10 +1405,26 @@ public:
RepeatedPass(int Count, PassT P) : Count(Count), P(std::move(P)) {}
template <typename IRUnitT, typename AnalysisManagerT, typename... Ts>
- PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, Ts &&... Args) {
+ PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, Ts &&... Args) {
+
+ // Request PassInstrumentation from analysis manager, will use it to run
+ // instrumenting callbacks for the passes later.
+ // Here we use std::tuple wrapper over getResult which helps to extract
+ // AnalysisManager's arguments out of the whole Args set.
+ PassInstrumentation PI =
+ detail::getAnalysisResult<PassInstrumentationAnalysis>(
+ AM, IR, std::tuple<Ts...>(Args...));
+
auto PA = PreservedAnalyses::all();
- for (int i = 0; i < Count; ++i)
- PA.intersect(P.run(Arg, AM, std::forward<Ts>(Args)...));
+ for (int i = 0; i < Count; ++i) {
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // pass, skip its execution completely if asked to (callback returns
+ // false).
+ if (!PI.runBeforePass<IRUnitT>(P, IR))
+ continue;
+ PA.intersect(P.run(IR, AM, std::forward<Ts>(Args)...));
+ PI.runAfterPass(P, IR);
+ }
return PA;
}
diff --git a/contrib/llvm/include/llvm/IR/PassManagerInternal.h b/contrib/llvm/include/llvm/IR/PassManagerInternal.h
index 16a3258b4121..5ad68be62742 100644
--- a/contrib/llvm/include/llvm/IR/PassManagerInternal.h
+++ b/contrib/llvm/include/llvm/IR/PassManagerInternal.h
@@ -48,7 +48,7 @@ struct PassConcept {
ExtraArgTs... ExtraArgs) = 0;
/// Polymorphic method to access the name of a pass.
- virtual StringRef name() = 0;
+ virtual StringRef name() const = 0;
};
/// A template wrapper used to implement the polymorphic API.
@@ -80,7 +80,7 @@ struct PassModel : PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...> {
return Pass.run(IR, AM, ExtraArgs...);
}
- StringRef name() override { return PassT::name(); }
+ StringRef name() const override { return PassT::name(); }
PassT Pass;
};
@@ -250,7 +250,7 @@ struct AnalysisPassConcept {
ExtraArgTs... ExtraArgs) = 0;
/// Polymorphic method to access the name of a pass.
- virtual StringRef name() = 0;
+ virtual StringRef name() const = 0;
};
/// Wrapper to model the analysis pass concept.
@@ -290,13 +290,14 @@ struct AnalysisPassModel : AnalysisPassConcept<IRUnitT, PreservedAnalysesT,
AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
ExtraArgTs... ExtraArgs) override {
- return llvm::make_unique<ResultModelT>(Pass.run(IR, AM, ExtraArgs...));
+ return llvm::make_unique<ResultModelT>(
+ Pass.run(IR, AM, std::forward<ExtraArgTs>(ExtraArgs)...));
}
/// The model delegates to a static \c PassT::name method.
///
/// The returned string ref must point to constant immutable data!
- StringRef name() override { return PassT::name(); }
+ StringRef name() const override { return PassT::name(); }
PassT Pass;
};
diff --git a/contrib/llvm/include/llvm/IR/PassTimingInfo.h b/contrib/llvm/include/llvm/IR/PassTimingInfo.h
new file mode 100644
index 000000000000..e9945f997f43
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PassTimingInfo.h
@@ -0,0 +1,108 @@
+//===- PassTimingInfo.h - pass execution timing -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header defines classes/functions to handle pass execution timing
+/// information with interfaces for both pass managers.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSTIMINGINFO_H
+#define LLVM_IR_PASSTIMINGINFO_H
+
+#include "llvm/ADT/Any.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/TypeName.h"
+#include <memory>
+namespace llvm {
+
+class Pass;
+class PassInstrumentationCallbacks;
+
+/// If -time-passes has been specified, report the timings immediately and then
+/// reset the timers to zero.
+void reportAndResetTimings();
+
+/// Request the timer for this legacy-pass-manager's pass instance.
+Timer *getPassTimer(Pass *);
+
+/// If the user specifies the -time-passes argument on an LLVM tool command line
+/// then the value of this boolean will be true, otherwise false.
+/// This is the storage for the -time-passes option.
+extern bool TimePassesIsEnabled;
+
+/// This class implements -time-passes functionality for new pass manager.
+/// It provides the pass-instrumentation callbacks that measure the pass
+/// execution time. They collect timing info into individual timers as
+/// passes are being run. At the end of its life-time it prints the resulting
+/// timing report.
+class TimePassesHandler {
+ /// Value of this type is capable of uniquely identifying pass invocations.
+ /// It is a pair of string Pass-Identifier (which for now is common
+ /// to all the instance of a given pass) + sequential invocation counter.
+ using PassInvocationID = std::pair<StringRef, unsigned>;
+
+ /// A group of all pass-timing timers.
+ TimerGroup TG;
+
+ /// Map of timers for pass invocations
+ DenseMap<PassInvocationID, std::unique_ptr<Timer>> TimingData;
+
+ /// Map that counts invocations of passes, for use in UniqPassID construction.
+ StringMap<unsigned> PassIDCountMap;
+
+ /// Stack of currently active timers.
+ SmallVector<Timer *, 8> TimerStack;
+
+ bool Enabled;
+
+public:
+ TimePassesHandler(bool Enabled = TimePassesIsEnabled);
+
+ /// Destructor handles the print action if it has not been handled before.
+ ~TimePassesHandler() {
+ // First destroying the timers from TimingData, which deploys all their
+ // collected data into the TG time group member, which later prints itself
+ // when being destroyed.
+ TimingData.clear();
+ }
+
+ /// Prints out timing information and then resets the timers.
+ void print();
+
+ // We intend this to be unique per-compilation, thus no copies.
+ TimePassesHandler(const TimePassesHandler &) = delete;
+ void operator=(const TimePassesHandler &) = delete;
+
+ void registerCallbacks(PassInstrumentationCallbacks &PIC);
+
+private:
+ /// Dumps information for running/triggered timers, useful for debugging
+ LLVM_DUMP_METHOD void dump() const;
+
+ /// Returns the new timer for each new run of the pass.
+ Timer &getPassTimer(StringRef PassID);
+
+ /// Returns the incremented counter for the next invocation of \p PassID.
+ unsigned nextPassID(StringRef PassID) { return ++PassIDCountMap[PassID]; }
+
+ void startTimer(StringRef PassID);
+ void stopTimer(StringRef PassID);
+
+ // Implementation of pass instrumentation callbacks.
+ bool runBeforePass(StringRef PassID);
+ void runAfterPass(StringRef PassID);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PatternMatch.h b/contrib/llvm/include/llvm/IR/PatternMatch.h
index af0616cd8221..120fc253b908 100644
--- a/contrib/llvm/include/llvm/IR/PatternMatch.h
+++ b/contrib/llvm/include/llvm/IR/PatternMatch.h
@@ -31,7 +31,6 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
@@ -215,6 +214,7 @@ template <typename Predicate> struct cst_pred_ty : public Predicate {
// Non-splat vector constant: check each element for a match.
unsigned NumElts = V->getType()->getVectorNumElements();
assert(NumElts != 0 && "Constant vector with no elements?");
+ bool HasNonUndefElements = false;
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = C->getAggregateElement(i);
if (!Elt)
@@ -224,8 +224,9 @@ template <typename Predicate> struct cst_pred_ty : public Predicate {
auto *CI = dyn_cast<ConstantInt>(Elt);
if (!CI || !this->isValue(CI->getValue()))
return false;
+ HasNonUndefElements = true;
}
- return true;
+ return HasNonUndefElements;
}
}
return false;
@@ -272,6 +273,7 @@ template <typename Predicate> struct cstfp_pred_ty : public Predicate {
// Non-splat vector constant: check each element for a match.
unsigned NumElts = V->getType()->getVectorNumElements();
assert(NumElts != 0 && "Constant vector with no elements?");
+ bool HasNonUndefElements = false;
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = C->getAggregateElement(i);
if (!Elt)
@@ -281,8 +283,9 @@ template <typename Predicate> struct cstfp_pred_ty : public Predicate {
auto *CF = dyn_cast<ConstantFP>(Elt);
if (!CF || !this->isValue(CF->getValueAPF()))
return false;
+ HasNonUndefElements = true;
}
- return true;
+ return HasNonUndefElements;
}
}
return false;
@@ -659,11 +662,39 @@ inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
}
+template <typename Op_t> struct FNeg_match {
+ Op_t X;
+
+ FNeg_match(const Op_t &Op) : X(Op) {}
+ template <typename OpTy> bool match(OpTy *V) {
+ auto *FPMO = dyn_cast<FPMathOperator>(V);
+ if (!FPMO || FPMO->getOpcode() != Instruction::FSub)
+ return false;
+ if (FPMO->hasNoSignedZeros()) {
+ // With 'nsz', any zero goes.
+ if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0)))
+ return false;
+ } else {
+ // Without 'nsz', we need fsub -0.0, X exactly.
+ if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0)))
+ return false;
+ }
+ return X.match(FPMO->getOperand(1));
+ }
+};
+
/// Match 'fneg X' as 'fsub -0.0, X'.
+template <typename OpTy>
+inline FNeg_match<OpTy>
+m_FNeg(const OpTy &X) {
+ return FNeg_match<OpTy>(X);
+}
+
+/// Match 'fneg X' as 'fsub +-0.0, X'.
template <typename RHS>
-inline BinaryOp_match<cstfp_pred_ty<is_neg_zero_fp>, RHS, Instruction::FSub>
-m_FNeg(const RHS &X) {
- return m_FSub(m_NegZeroFP(), X);
+inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub>
+m_FNegNSZ(const RHS &X) {
+ return m_FSub(m_AnyZeroFP(), X);
}
template <typename LHS, typename RHS>
@@ -991,116 +1022,111 @@ m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
}
//===----------------------------------------------------------------------===//
-// Matchers for SelectInst classes
+// Matchers for instructions with a given opcode and number of operands.
//
-template <typename Cond_t, typename LHS_t, typename RHS_t>
-struct SelectClass_match {
- Cond_t C;
- LHS_t L;
- RHS_t R;
+/// Matches instructions with Opcode and three operands.
+template <typename T0, unsigned Opcode> struct OneOps_match {
+ T0 Op1;
- SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
- : C(Cond), L(LHS), R(RHS) {}
+ OneOps_match(const T0 &Op1) : Op1(Op1) {}
template <typename OpTy> bool match(OpTy *V) {
- if (auto *I = dyn_cast<SelectInst>(V))
- return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
- R.match(I->getOperand(2));
+ if (V->getValueID() == Value::InstructionVal + Opcode) {
+ auto *I = cast<Instruction>(V);
+ return Op1.match(I->getOperand(0));
+ }
return false;
}
};
+/// Matches instructions with Opcode and three operands.
+template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match {
+ T0 Op1;
+ T1 Op2;
+
+ TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opcode) {
+ auto *I = cast<Instruction>(V);
+ return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1));
+ }
+ return false;
+ }
+};
+
+/// Matches instructions with Opcode and three operands.
+template <typename T0, typename T1, typename T2, unsigned Opcode>
+struct ThreeOps_match {
+ T0 Op1;
+ T1 Op2;
+ T2 Op3;
+
+ ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3)
+ : Op1(Op1), Op2(Op2), Op3(Op3) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opcode) {
+ auto *I = cast<Instruction>(V);
+ return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
+ Op3.match(I->getOperand(2));
+ }
+ return false;
+ }
+};
+
+/// Matches SelectInst.
template <typename Cond, typename LHS, typename RHS>
-inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
- const RHS &R) {
- return SelectClass_match<Cond, LHS, RHS>(C, L, R);
+inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select>
+m_Select(const Cond &C, const LHS &L, const RHS &R) {
+ return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R);
}
/// This matches a select of two constants, e.g.:
/// m_SelectCst<-1, 0>(m_Value(V))
template <int64_t L, int64_t R, typename Cond>
-inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
+inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>,
+ Instruction::Select>
m_SelectCst(const Cond &C) {
return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
}
-//===----------------------------------------------------------------------===//
-// Matchers for InsertElementInst classes
-//
-
+/// Matches InsertElementInst.
template <typename Val_t, typename Elt_t, typename Idx_t>
-struct InsertElementClass_match {
- Val_t V;
- Elt_t E;
- Idx_t I;
-
- InsertElementClass_match(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
- : V(Val), E(Elt), I(Idx) {}
-
- template <typename OpTy> bool match(OpTy *VV) {
- if (auto *II = dyn_cast<InsertElementInst>(VV))
- return V.match(II->getOperand(0)) && E.match(II->getOperand(1)) &&
- I.match(II->getOperand(2));
- return false;
- }
-};
-
-template <typename Val_t, typename Elt_t, typename Idx_t>
-inline InsertElementClass_match<Val_t, Elt_t, Idx_t>
+inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
- return InsertElementClass_match<Val_t, Elt_t, Idx_t>(Val, Elt, Idx);
+ return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
+ Val, Elt, Idx);
}
-//===----------------------------------------------------------------------===//
-// Matchers for ExtractElementInst classes
-//
-
-template <typename Val_t, typename Idx_t> struct ExtractElementClass_match {
- Val_t V;
- Idx_t I;
-
- ExtractElementClass_match(const Val_t &Val, const Idx_t &Idx)
- : V(Val), I(Idx) {}
-
- template <typename OpTy> bool match(OpTy *VV) {
- if (auto *II = dyn_cast<ExtractElementInst>(VV))
- return V.match(II->getOperand(0)) && I.match(II->getOperand(1));
- return false;
- }
-};
-
+/// Matches ExtractElementInst.
template <typename Val_t, typename Idx_t>
-inline ExtractElementClass_match<Val_t, Idx_t>
+inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
m_ExtractElement(const Val_t &Val, const Idx_t &Idx) {
- return ExtractElementClass_match<Val_t, Idx_t>(Val, Idx);
+ return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
}
-//===----------------------------------------------------------------------===//
-// Matchers for ShuffleVectorInst classes
-//
-
+/// Matches ShuffleVectorInst.
template <typename V1_t, typename V2_t, typename Mask_t>
-struct ShuffleVectorClass_match {
- V1_t V1;
- V2_t V2;
- Mask_t M;
-
- ShuffleVectorClass_match(const V1_t &v1, const V2_t &v2, const Mask_t &m)
- : V1(v1), V2(v2), M(m) {}
+inline ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>
+m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) {
+ return ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>(v1, v2,
+ m);
+}
- template <typename OpTy> bool match(OpTy *V) {
- if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
- return V1.match(SI->getOperand(0)) && V2.match(SI->getOperand(1)) &&
- M.match(SI->getOperand(2));
- return false;
- }
-};
+/// Matches LoadInst.
+template <typename OpTy>
+inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) {
+ return OneOps_match<OpTy, Instruction::Load>(Op);
+}
-template <typename V1_t, typename V2_t, typename Mask_t>
-inline ShuffleVectorClass_match<V1_t, V2_t, Mask_t>
-m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) {
- return ShuffleVectorClass_match<V1_t, V2_t, Mask_t>(v1, v2, m);
+/// Matches StoreInst.
+template <typename ValueOpTy, typename PointerOpTy>
+inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>
+m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
+ return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp,
+ PointerOp);
}
//===----------------------------------------------------------------------===//
@@ -1181,54 +1207,6 @@ inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
}
//===----------------------------------------------------------------------===//
-// Matcher for LoadInst classes
-//
-
-template <typename Op_t> struct LoadClass_match {
- Op_t Op;
-
- LoadClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
-
- template <typename OpTy> bool match(OpTy *V) {
- if (auto *LI = dyn_cast<LoadInst>(V))
- return Op.match(LI->getPointerOperand());
- return false;
- }
-};
-
-/// Matches LoadInst.
-template <typename OpTy> inline LoadClass_match<OpTy> m_Load(const OpTy &Op) {
- return LoadClass_match<OpTy>(Op);
-}
-
-//===----------------------------------------------------------------------===//
-// Matcher for StoreInst classes
-//
-
-template <typename ValueOp_t, typename PointerOp_t> struct StoreClass_match {
- ValueOp_t ValueOp;
- PointerOp_t PointerOp;
-
- StoreClass_match(const ValueOp_t &ValueOpMatch,
- const PointerOp_t &PointerOpMatch) :
- ValueOp(ValueOpMatch), PointerOp(PointerOpMatch) {}
-
- template <typename OpTy> bool match(OpTy *V) {
- if (auto *LI = dyn_cast<StoreInst>(V))
- return ValueOp.match(LI->getValueOperand()) &&
- PointerOp.match(LI->getPointerOperand());
- return false;
- }
-};
-
-/// Matches StoreInst.
-template <typename ValueOpTy, typename PointerOpTy>
-inline StoreClass_match<ValueOpTy, PointerOpTy>
-m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
- return StoreClass_match<ValueOpTy, PointerOpTy>(ValueOp, PointerOp);
-}
-
-//===----------------------------------------------------------------------===//
// Matchers for control flow.
//
@@ -1507,8 +1485,10 @@ template <typename Opnd_t> struct Argument_match {
Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
template <typename OpTy> bool match(OpTy *V) {
- CallSite CS(V);
- return CS.isCall() && Val.match(CS.getArgument(OpI));
+ // FIXME: Should likely be switched to use `CallBase`.
+ if (const auto *CI = dyn_cast<CallInst>(V))
+ return Val.match(CI->getArgOperand(OpI));
+ return false;
}
};
diff --git a/contrib/llvm/include/llvm/IR/RuntimeLibcalls.def b/contrib/llvm/include/llvm/IR/RuntimeLibcalls.def
index 7ed90d959f01..89005120cdc1 100644
--- a/contrib/llvm/include/llvm/IR/RuntimeLibcalls.def
+++ b/contrib/llvm/include/llvm/IR/RuntimeLibcalls.def
@@ -83,6 +83,9 @@ HANDLE_LIBCALL(UDIVREM_I64, nullptr)
HANDLE_LIBCALL(UDIVREM_I128, nullptr)
HANDLE_LIBCALL(NEG_I32, "__negsi2")
HANDLE_LIBCALL(NEG_I64, "__negdi2")
+HANDLE_LIBCALL(CTLZ_I32, "__clzsi2")
+HANDLE_LIBCALL(CTLZ_I64, "__clzdi2")
+HANDLE_LIBCALL(CTLZ_I128, "__clzti2")
// Floating-point
HANDLE_LIBCALL(ADD_F32, "__addsf3")
@@ -125,6 +128,11 @@ HANDLE_LIBCALL(SQRT_F64, "sqrt")
HANDLE_LIBCALL(SQRT_F80, "sqrtl")
HANDLE_LIBCALL(SQRT_F128, "sqrtl")
HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
+HANDLE_LIBCALL(CBRT_F32, "cbrtf")
+HANDLE_LIBCALL(CBRT_F64, "cbrt")
+HANDLE_LIBCALL(CBRT_F80, "cbrtl")
+HANDLE_LIBCALL(CBRT_F128, "cbrtl")
+HANDLE_LIBCALL(CBRT_PPCF128, "cbrtl")
HANDLE_LIBCALL(LOG_F32, "logf")
HANDLE_LIBCALL(LOG_F64, "log")
HANDLE_LIBCALL(LOG_F80, "logl")
diff --git a/contrib/llvm/include/llvm/IR/TypeBuilder.h b/contrib/llvm/include/llvm/IR/TypeBuilder.h
deleted file mode 100644
index d2c6f00079da..000000000000
--- a/contrib/llvm/include/llvm/IR/TypeBuilder.h
+++ /dev/null
@@ -1,407 +0,0 @@
-//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TypeBuilder class, which is used as a convenient way to
-// create LLVM types with a consistent and simplified interface.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_TYPEBUILDER_H
-#define LLVM_IR_TYPEBUILDER_H
-
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/LLVMContext.h"
-#include <climits>
-
-namespace llvm {
-
-/// TypeBuilder - This provides a uniform API for looking up types
-/// known at compile time. To support cross-compilation, we define a
-/// series of tag types in the llvm::types namespace, like i<N>,
-/// ieee_float, ppc_fp128, etc. TypeBuilder<T, false> allows T to be
-/// any of these, a native C type (whose size may depend on the host
-/// compiler), or a pointer, function, or struct type built out of
-/// these. TypeBuilder<T, true> removes native C types from this set
-/// to guarantee that its result is suitable for cross-compilation.
-/// We define the primitive types, pointer types, and functions up to
-/// 5 arguments here, but to use this class with your own types,
-/// you'll need to specialize it. For example, say you want to call a
-/// function defined externally as:
-///
-/// \code{.cpp}
-///
-/// struct MyType {
-/// int32 a;
-/// int32 *b;
-/// void *array[1]; // Intended as a flexible array.
-/// };
-/// int8 AFunction(struct MyType *value);
-///
-/// \endcode
-///
-/// You'll want to use
-/// Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...)
-/// to declare the function, but when you first try this, your compiler will
-/// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this,
-/// write:
-///
-/// \code{.cpp}
-///
-/// namespace llvm {
-/// template<bool xcompile> class TypeBuilder<MyType, xcompile> {
-/// public:
-/// static StructType *get(LLVMContext &Context) {
-/// // If you cache this result, be sure to cache it separately
-/// // for each LLVMContext.
-/// return StructType::get(
-/// TypeBuilder<types::i<32>, xcompile>::get(Context),
-/// TypeBuilder<types::i<32>*, xcompile>::get(Context),
-/// TypeBuilder<types::i<8>*[], xcompile>::get(Context),
-/// nullptr);
-/// }
-///
-/// // You may find this a convenient place to put some constants
-/// // to help with getelementptr. They don't have any effect on
-/// // the operation of TypeBuilder.
-/// enum Fields {
-/// FIELD_A,
-/// FIELD_B,
-/// FIELD_ARRAY
-/// };
-/// }
-/// } // namespace llvm
-///
-/// \endcode
-///
-/// TypeBuilder cannot handle recursive types or types you only know at runtime.
-/// If you try to give it a recursive type, it will deadlock, infinitely
-/// recurse, or do something similarly undesirable.
-template<typename T, bool cross_compilable> class TypeBuilder {};
-
-// Types for use with cross-compilable TypeBuilders. These correspond
-// exactly with an LLVM-native type.
-namespace types {
-/// i<N> corresponds to the LLVM IntegerType with N bits.
-template<uint32_t num_bits> class i {};
-
-// The following classes represent the LLVM floating types.
-class ieee_float {};
-class ieee_double {};
-class x86_fp80 {};
-class fp128 {};
-class ppc_fp128 {};
-// X86 MMX.
-class x86_mmx {};
-} // namespace types
-
-// LLVM doesn't have const or volatile types.
-template<typename T, bool cross> class TypeBuilder<const T, cross>
- : public TypeBuilder<T, cross> {};
-template<typename T, bool cross> class TypeBuilder<volatile T, cross>
- : public TypeBuilder<T, cross> {};
-template<typename T, bool cross> class TypeBuilder<const volatile T, cross>
- : public TypeBuilder<T, cross> {};
-
-// Pointers
-template<typename T, bool cross> class TypeBuilder<T*, cross> {
-public:
- static PointerType *get(LLVMContext &Context) {
- return PointerType::getUnqual(TypeBuilder<T,cross>::get(Context));
- }
-};
-
-/// There is no support for references
-template<typename T, bool cross> class TypeBuilder<T&, cross> {};
-
-// Arrays
-template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> {
-public:
- static ArrayType *get(LLVMContext &Context) {
- return ArrayType::get(TypeBuilder<T, cross>::get(Context), N);
- }
-};
-/// LLVM uses an array of length 0 to represent an unknown-length array.
-template<typename T, bool cross> class TypeBuilder<T[], cross> {
-public:
- static ArrayType *get(LLVMContext &Context) {
- return ArrayType::get(TypeBuilder<T, cross>::get(Context), 0);
- }
-};
-
-// Define the C integral types only for TypeBuilder<T, false>.
-//
-// C integral types do not have a defined size. It would be nice to use the
-// stdint.h-defined typedefs that do have defined sizes, but we'd run into the
-// following problem:
-//
-// On an ILP32 machine, stdint.h might define:
-//
-// typedef int int32_t;
-// typedef long long int64_t;
-// typedef long size_t;
-//
-// If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of
-// TypeBuilder<size_t> would fail. We couldn't define TypeBuilder<size_t> in
-// addition to the defined-size types because we'd get duplicate definitions on
-// platforms where stdint.h instead defines:
-//
-// typedef int int32_t;
-// typedef long long int64_t;
-// typedef int size_t;
-//
-// So we define all the primitive C types and nothing else.
-#define DEFINE_INTEGRAL_TYPEBUILDER(T) \
-template<> class TypeBuilder<T, false> { \
-public: \
- static IntegerType *get(LLVMContext &Context) { \
- return IntegerType::get(Context, sizeof(T) * CHAR_BIT); \
- } \
-}; \
-template<> class TypeBuilder<T, true> { \
- /* We provide a definition here so users don't accidentally */ \
- /* define these types to work. */ \
-}
-DEFINE_INTEGRAL_TYPEBUILDER(char);
-DEFINE_INTEGRAL_TYPEBUILDER(signed char);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned char);
-DEFINE_INTEGRAL_TYPEBUILDER(short);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned short);
-DEFINE_INTEGRAL_TYPEBUILDER(int);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned int);
-DEFINE_INTEGRAL_TYPEBUILDER(long);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned long);
-#ifdef _MSC_VER
-DEFINE_INTEGRAL_TYPEBUILDER(__int64);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64);
-#else /* _MSC_VER */
-DEFINE_INTEGRAL_TYPEBUILDER(long long);
-DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long);
-#endif /* _MSC_VER */
-#undef DEFINE_INTEGRAL_TYPEBUILDER
-
-template<uint32_t num_bits, bool cross>
-class TypeBuilder<types::i<num_bits>, cross> {
-public:
- static IntegerType *get(LLVMContext &C) {
- return IntegerType::get(C, num_bits);
- }
-};
-
-template<> class TypeBuilder<float, false> {
-public:
- static Type *get(LLVMContext& C) {
- return Type::getFloatTy(C);
- }
-};
-template<> class TypeBuilder<float, true> {};
-
-template<> class TypeBuilder<double, false> {
-public:
- static Type *get(LLVMContext& C) {
- return Type::getDoubleTy(C);
- }
-};
-template<> class TypeBuilder<double, true> {};
-
-template<bool cross> class TypeBuilder<types::ieee_float, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getFloatTy(C); }
-};
-template<bool cross> class TypeBuilder<types::ieee_double, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); }
-};
-template<bool cross> class TypeBuilder<types::x86_fp80, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getX86_FP80Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::fp128, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getFP128Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::ppc_fp128, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
-};
-template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
-public:
- static Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
-};
-
-template<bool cross> class TypeBuilder<void, cross> {
-public:
- static Type *get(LLVMContext &C) {
- return Type::getVoidTy(C);
- }
-};
-
-/// void* is disallowed in LLVM types, but it occurs often enough in C code that
-/// we special case it.
-template<> class TypeBuilder<void*, false>
- : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<const void*, false>
- : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<volatile void*, false>
- : public TypeBuilder<types::i<8>*, false> {};
-template<> class TypeBuilder<const volatile void*, false>
- : public TypeBuilder<types::i<8>*, false> {};
-
-template<typename R, bool cross> class TypeBuilder<R(), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- return FunctionType::get(TypeBuilder<R, cross>::get(Context), false);
- }
-};
-template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, false);
- }
-};
-template<typename R, typename A1, typename A2, bool cross>
-class TypeBuilder<R(A1, A2), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, false);
- }
-};
-template<typename R, typename A1, typename A2, typename A3, bool cross>
-class TypeBuilder<R(A1, A2, A3), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, false);
- }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
- bool cross>
-class TypeBuilder<R(A1, A2, A3, A4), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- TypeBuilder<A4, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, false);
- }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
- typename A5, bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, A5), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- TypeBuilder<A4, cross>::get(Context),
- TypeBuilder<A5, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, false);
- }
-};
-
-template<typename R, bool cross> class TypeBuilder<R(...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- return FunctionType::get(TypeBuilder<R, cross>::get(Context), true);
- }
-};
-template<typename R, typename A1, bool cross>
-class TypeBuilder<R(A1, ...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true);
- }
-};
-template<typename R, typename A1, typename A2, bool cross>
-class TypeBuilder<R(A1, A2, ...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, true);
- }
-};
-template<typename R, typename A1, typename A2, typename A3, bool cross>
-class TypeBuilder<R(A1, A2, A3, ...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, true);
- }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
- bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, ...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- TypeBuilder<A4, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, true);
- }
-};
-
-template<typename R, typename A1, typename A2, typename A3, typename A4,
- typename A5, bool cross>
-class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> {
-public:
- static FunctionType *get(LLVMContext &Context) {
- Type *params[] = {
- TypeBuilder<A1, cross>::get(Context),
- TypeBuilder<A2, cross>::get(Context),
- TypeBuilder<A3, cross>::get(Context),
- TypeBuilder<A4, cross>::get(Context),
- TypeBuilder<A5, cross>::get(Context),
- };
- return FunctionType::get(TypeBuilder<R, cross>::get(Context),
- params, true);
- }
-};
-
-} // namespace llvm
-
-#endif
diff --git a/contrib/llvm/include/llvm/IR/Value.h b/contrib/llvm/include/llvm/IR/Value.h
index f396db995ab0..4f3a45c684fc 100644
--- a/contrib/llvm/include/llvm/IR/Value.h
+++ b/contrib/llvm/include/llvm/IR/Value.h
@@ -254,7 +254,8 @@ public:
private:
void destroyValueName();
- void doRAUW(Value *New, bool NoMetadata);
+ enum class ReplaceMetadataUses { No, Yes };
+ void doRAUW(Value *New, ReplaceMetadataUses);
void setNameImpl(const Twine &Name);
public:
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
index d67b1d48f274..037c0dbb56ec 100644
--- a/contrib/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -85,6 +85,7 @@ void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
void initializeBranchRelaxationPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
void initializeBreakFalseDepsPass(PassRegistry&);
+void initializeCanonicalizeAliasesLegacyPassPass(PassRegistry &);
void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
void initializeCFGPrinterLegacyPassPass(PassRegistry&);
@@ -103,6 +104,7 @@ void initializeCodeGenPreparePass(PassRegistry&);
void initializeConstantHoistingLegacyPassPass(PassRegistry&);
void initializeConstantMergeLegacyPassPass(PassRegistry&);
void initializeConstantPropagationPass(PassRegistry&);
+void initializeControlHeightReductionLegacyPassPass(PassRegistry&);
void initializeCorrelatedValuePropagationPass(PassRegistry&);
void initializeCostModelAnalysisPass(PassRegistry&);
void initializeCrossDSOCFIPass(PassRegistry&);
@@ -119,7 +121,6 @@ void initializeDependenceAnalysisPass(PassRegistry&);
void initializeDependenceAnalysisWrapperPassPass(PassRegistry&);
void initializeDetectDeadLanesPass(PassRegistry&);
void initializeDivRemPairsLegacyPassPass(PassRegistry&);
-void initializeDivergenceAnalysisPass(PassRegistry&);
void initializeDomOnlyPrinterPass(PassRegistry&);
void initializeDomOnlyViewerPass(PassRegistry&);
void initializeDomPrinterPass(PassRegistry&);
@@ -140,6 +141,7 @@ void initializeExpandISelPseudosPass(PassRegistry&);
void initializeExpandMemCmpPassPass(PassRegistry&);
void initializeExpandPostRAPass(PassRegistry&);
void initializeExpandReductionsPass(PassRegistry&);
+void initializeMakeGuardsExplicitLegacyPassPass(PassRegistry&);
void initializeExternalAAWrapperPassPass(PassRegistry&);
void initializeFEntryInserterPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
@@ -161,6 +163,7 @@ void initializeGlobalOptLegacyPassPass(PassRegistry&);
void initializeGlobalSplitPass(PassRegistry&);
void initializeGlobalsAAWrapperPassPass(PassRegistry&);
void initializeGuardWideningLegacyPassPass(PassRegistry&);
+void initializeHotColdSplittingLegacyPassPass(PassRegistry&);
void initializeHWAddressSanitizerPass(PassRegistry&);
void initializeIPCPPass(PassRegistry&);
void initializeIPSCCPLegacyPassPass(PassRegistry&);
@@ -181,6 +184,7 @@ void initializeInstrProfilingLegacyPassPass(PassRegistry&);
void initializeInstructionCombiningPassPass(PassRegistry&);
void initializeInstructionSelectPass(PassRegistry&);
void initializeInterleavedAccessPass(PassRegistry&);
+void initializeInterleavedLoadCombinePass(PassRegistry &);
void initializeInternalizeLegacyPassPass(PassRegistry&);
void initializeIntervalPartitionPass(PassRegistry&);
void initializeJumpThreadingPass(PassRegistry&);
@@ -191,9 +195,11 @@ void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
void initializeLazyValueInfoPrinterPass(PassRegistry&);
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
+void initializeLegacyDivergenceAnalysisPass(PassRegistry&);
void initializeLegacyLICMPassPass(PassRegistry&);
void initializeLegacyLoopSinkPassPass(PassRegistry&);
void initializeLegalizerPass(PassRegistry&);
+void initializeGISelCSEAnalysisWrapperPassPass(PassRegistry &);
void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
void initializeLintPass(PassRegistry&);
void initializeLiveDebugValuesPass(PassRegistry&);
@@ -203,7 +209,7 @@ void initializeLiveRangeShrinkPass(PassRegistry&);
void initializeLiveRegMatrixPass(PassRegistry&);
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);
-void initializeLoadStoreVectorizerPass(PassRegistry&);
+void initializeLoadStoreVectorizerLegacyPassPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLocalizerPass(PassRegistry&);
@@ -269,7 +275,7 @@ void initializeMemDerefPrinterPass(PassRegistry&);
void initializeMemoryDependenceWrapperPassPass(PassRegistry&);
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry&);
void initializeMemorySSAWrapperPassPass(PassRegistry&);
-void initializeMemorySanitizerPass(PassRegistry&);
+void initializeMemorySanitizerLegacyPassPass(PassRegistry&);
void initializeMergeFunctionsPass(PassRegistry&);
void initializeMergeICmpsPass(PassRegistry&);
void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry&);
@@ -352,7 +358,7 @@ void initializeSampleProfileLoaderLegacyPassPass(PassRegistry&);
void initializeSanitizerCoverageModulePass(PassRegistry&);
void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
void initializeScalarizeMaskedMemIntrinPass(PassRegistry&);
-void initializeScalarizerPass(PassRegistry&);
+void initializeScalarizerLegacyPassPass(PassRegistry&);
void initializeScavengerTestPass(PassRegistry&);
void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
@@ -369,6 +375,8 @@ void initializeSpillPlacementPass(PassRegistry&);
void initializeStackColoringPass(PassRegistry&);
void initializeStackMapLivenessPass(PassRegistry&);
void initializeStackProtectorPass(PassRegistry&);
+void initializeStackSafetyGlobalInfoWrapperPassPass(PassRegistry &);
+void initializeStackSafetyInfoWrapperPassPass(PassRegistry &);
void initializeStackSlotColoringPass(PassRegistry&);
void initializeStraightLineStrengthReducePass(PassRegistry&);
void initializeStripDeadDebugInfoPass(PassRegistry&);
@@ -384,7 +392,7 @@ void initializeTailDuplicatePass(PassRegistry&);
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry&);
void initializeTargetPassConfigPass(PassRegistry&);
void initializeTargetTransformInfoWrapperPassPass(PassRegistry&);
-void initializeThreadSanitizerPass(PassRegistry&);
+void initializeThreadSanitizerLegacyPassPass(PassRegistry&);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
void initializeUnifyFunctionExitNodesPass(PassRegistry&);
@@ -394,6 +402,7 @@ void initializeUnreachableMachineBlockElimPass(PassRegistry&);
void initializeVerifierLegacyPassPass(PassRegistry&);
void initializeVirtRegMapPass(PassRegistry&);
void initializeVirtRegRewriterPass(PassRegistry&);
+void initializeWarnMissedTransformationsLegacyPass(PassRegistry &);
void initializeWasmEHPreparePass(PassRegistry&);
void initializeWholeProgramDevirtPass(PassRegistry&);
void initializeWinEHPreparePass(PassRegistry&);
diff --git a/contrib/llvm/include/llvm/LTO/Config.h b/contrib/llvm/include/llvm/LTO/Config.h
index 57bba5e34840..7058602c3ee2 100644
--- a/contrib/llvm/include/llvm/LTO/Config.h
+++ b/contrib/llvm/include/llvm/LTO/Config.h
@@ -49,6 +49,10 @@ struct Config {
/// Use the new pass manager
bool UseNewPM = false;
+ /// Flag to indicate that the optimizer should not assume builtins are present
+ /// on the target.
+ bool Freestanding = false;
+
/// Disable entirely the optimizer, including importing for ThinLTO
bool CodeGenOnly = false;
@@ -73,6 +77,9 @@ struct Config {
/// Sample PGO profile path.
std::string SampleProfile;
+ /// Name remapping file for profile data.
+ std::string ProfileRemapping;
+
/// The directory to store .dwo files.
std::string DwoDir;
diff --git a/contrib/llvm/include/llvm/LTO/LTO.h b/contrib/llvm/include/llvm/LTO/LTO.h
index 7d6beab6b441..534d9b6f3f2a 100644
--- a/contrib/llvm/include/llvm/LTO/LTO.h
+++ b/contrib/llvm/include/llvm/LTO/LTO.h
@@ -40,13 +40,13 @@ class Module;
class Target;
class raw_pwrite_stream;
-/// Resolve Weak and LinkOnce values in the \p Index. Linkage changes recorded
-/// in the index and the ThinLTO backends must apply the changes to the Module
-/// via thinLTOResolveWeakForLinkerModule.
+/// Resolve linkage for prevailing symbols in the \p Index. Linkage changes
+/// recorded in the index and the ThinLTO backends must apply the changes to
+/// the module via thinLTOResolvePrevailingInModule.
///
/// This is done for correctness (if value exported, ensure we always
/// emit a copy), and compile-time optimization (allow drop of duplicates).
-void thinLTOResolveWeakForLinkerInIndex(
+void thinLTOResolvePrevailingInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
isPrevailing,
@@ -60,6 +60,19 @@ void thinLTOInternalizeAndPromoteInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(StringRef, GlobalValue::GUID)> isExported);
+/// Computes a unique hash for the Module considering the current list of
+/// export/import and other global analysis results.
+/// The hash is produced in \p Key.
+void computeLTOCacheKey(
+ SmallString<40> &Key, const lto::Config &Conf,
+ const ModuleSummaryIndex &Index, StringRef ModuleID,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ const GVSummaryMapTy &DefinedGlobals,
+ const std::set<GlobalValue::GUID> &CfiFunctionDefs = {},
+ const std::set<GlobalValue::GUID> &CfiFunctionDecls = {});
+
namespace lto {
/// Given the original \p Path to an output file, replace any path
@@ -387,6 +400,9 @@ private:
Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache);
mutable bool CalledGetMaxTasks = false;
+
+ // Use Optional to distinguish false from not yet initialized.
+ Optional<bool> EnableSplitLTOUnit;
};
/// The resolution for a symbol. The linker must provide a SymbolResolution for
diff --git a/contrib/llvm/include/llvm/LTO/SummaryBasedOptimizations.h b/contrib/llvm/include/llvm/LTO/SummaryBasedOptimizations.h
new file mode 100644
index 000000000000..ad3a8e7dc77b
--- /dev/null
+++ b/contrib/llvm/include/llvm/LTO/SummaryBasedOptimizations.h
@@ -0,0 +1,17 @@
+//=- llvm/LTO/SummaryBasedOptimizations.h -Link time optimizations-*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_SUMMARYBASEDOPTIMIZATIONS_H
+#define LLVM_LTO_SUMMARYBASEDOPTIMIZATIONS_H
+namespace llvm {
+class ModuleSummaryIndex;
+void computeSyntheticCounts(ModuleSummaryIndex &Index);
+
+} // namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h b/contrib/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
index f48ab02863a5..8f23b7cb4574 100644
--- a/contrib/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
+++ b/contrib/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
@@ -48,6 +48,9 @@
#include <string>
#include <vector>
+/// Enable global value internalization in LTO.
+extern llvm::cl::opt<bool> EnableLTOInternalization;
+
namespace llvm {
template <typename T> class ArrayRef;
class LLVMContext;
@@ -233,7 +236,7 @@ private:
unsigned OptLevel = 2;
lto_diagnostic_handler_t DiagHandler = nullptr;
void *DiagContext = nullptr;
- bool ShouldInternalize = true;
+ bool ShouldInternalize = EnableLTOInternalization;
bool ShouldEmbedUselists = false;
bool ShouldRestoreGlobalsLinkage = false;
TargetMachine::CodeGenFileType FileType = TargetMachine::CGFT_ObjectFile;
diff --git a/contrib/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/contrib/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
index b32a972542c8..d4c69a1ce260 100644
--- a/contrib/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
+++ b/contrib/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
@@ -187,7 +187,7 @@ public:
/// Cache policy: the maximum size for the cache directory in bytes. A value
/// over the amount of available space on the disk will be reduced to the
/// amount of available space. A value of 0 will be ignored.
- void setCacheMaxSizeBytes(unsigned MaxSizeBytes) {
+ void setCacheMaxSizeBytes(uint64_t MaxSizeBytes) {
if (MaxSizeBytes)
CacheOptions.Policy.MaxSizeBytes = MaxSizeBytes;
}
@@ -273,8 +273,8 @@ public:
/**
* Compute and emit the imported files for module at \p ModulePath.
*/
- static void emitImports(StringRef ModulePath, StringRef OutputName,
- ModuleSummaryIndex &Index);
+ void emitImports(Module &Module, StringRef OutputName,
+ ModuleSummaryIndex &Index);
/**
* Perform cross-module importing for the module identified by
@@ -285,8 +285,8 @@ public:
/**
* Compute the list of summaries needed for importing into module.
*/
- static void gatherImportedSummariesForModule(
- StringRef ModulePath, ModuleSummaryIndex &Index,
+ void gatherImportedSummariesForModule(
+ Module &Module, ModuleSummaryIndex &Index,
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
/**
@@ -299,11 +299,6 @@ public:
*/
void optimize(Module &Module);
- /**
- * Perform ThinLTO CodeGen.
- */
- std::unique_ptr<MemoryBuffer> codegen(Module &Module);
-
/**@}*/
private:
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index bd432c58b613..0851c2f8d265 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -50,6 +50,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/InstSimplifyPass.h"
+#include "llvm/Transforms/Scalar/Scalarizer.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
@@ -88,13 +89,13 @@ namespace {
(void) llvm::createCalledValuePropagationPass();
(void) llvm::createConstantMergePass();
(void) llvm::createConstantPropagationPass();
+ (void) llvm::createControlHeightReductionLegacyPass();
(void) llvm::createCostModelAnalysisPass();
(void) llvm::createDeadArgEliminationPass();
(void) llvm::createDeadCodeEliminationPass();
(void) llvm::createDeadInstEliminationPass();
(void) llvm::createDeadStoreEliminationPass();
(void) llvm::createDependenceAnalysisWrapperPass();
- (void) llvm::createDivergenceAnalysisPass();
(void) llvm::createDomOnlyPrinterPass();
(void) llvm::createDomPrinterPass();
(void) llvm::createDomOnlyViewerPass();
@@ -121,6 +122,7 @@ namespace {
(void) llvm::createInstructionCombiningPass();
(void) llvm::createInternalizePass();
(void) llvm::createLCSSAPass();
+ (void) llvm::createLegacyDivergenceAnalysisPass();
(void) llvm::createLICMPass();
(void) llvm::createLoopSinkPass();
(void) llvm::createLazyValueInfoPass();
@@ -218,6 +220,7 @@ namespace {
(void) llvm::createFloat2IntPass();
(void) llvm::createEliminateAvailableExternallyPass();
(void) llvm::createScalarizeMaskedMemIntrinPass();
+ (void) llvm::createWarnMissedTransformationsPass();
(void)new llvm::IntervalPartition();
(void)new llvm::ScalarEvolutionWrapperPass();
@@ -227,7 +230,8 @@ namespace {
llvm::TargetLibraryInfo TLI(TLII);
llvm::AliasAnalysis AA(TLI);
llvm::AliasSetTracker X(AA);
- X.add(nullptr, 0, llvm::AAMDNodes()); // for -print-alias-sets
+ X.add(nullptr, llvm::LocationSize::unknown(),
+ llvm::AAMDNodes()); // for -print-alias-sets
(void) llvm::AreStatisticsEnabled();
(void) llvm::sys::RunningOnValgrind();
}
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoWasm.h b/contrib/llvm/include/llvm/MC/MCAsmInfoWasm.h
index bc46cfdf4c4c..71c6ee28df70 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfoWasm.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoWasm.h
@@ -19,6 +19,6 @@ class MCAsmInfoWasm : public MCAsmInfo {
protected:
MCAsmInfoWasm();
};
-}
+} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmMacro.h b/contrib/llvm/include/llvm/MC/MCAsmMacro.h
index 09b32c7ea333..135fa4f2e33d 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmMacro.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmMacro.h
@@ -52,7 +52,7 @@ public:
Pipe, PipePipe, Caret,
Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
Less, LessEqual, LessLess, LessGreater,
- Greater, GreaterEqual, GreaterGreater, At,
+ Greater, GreaterEqual, GreaterGreater, At, MinusGreater,
// MIPS unary expression operators such as %neg.
PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index 0f9499d705e4..986c6e17548f 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -23,6 +23,7 @@
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCLinkerOptimizationHint.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -94,6 +95,8 @@ public:
unsigned Major;
unsigned Minor;
unsigned Update;
+ /// An optional version of the SDK that was used to build the source.
+ VersionTuple SDKVersion;
};
private:
@@ -255,20 +258,24 @@ public:
/// MachO deployment target version information.
const VersionInfoType &getVersionInfo() const { return VersionInfo; }
void setVersionMin(MCVersionMinType Type, unsigned Major, unsigned Minor,
- unsigned Update) {
+ unsigned Update,
+ VersionTuple SDKVersion = VersionTuple()) {
VersionInfo.EmitBuildVersion = false;
VersionInfo.TypeOrPlatform.Type = Type;
VersionInfo.Major = Major;
VersionInfo.Minor = Minor;
VersionInfo.Update = Update;
+ VersionInfo.SDKVersion = SDKVersion;
}
void setBuildVersion(MachO::PlatformType Platform, unsigned Major,
- unsigned Minor, unsigned Update) {
+ unsigned Minor, unsigned Update,
+ VersionTuple SDKVersion = VersionTuple()) {
VersionInfo.EmitBuildVersion = true;
VersionInfo.TypeOrPlatform.Platform = Platform;
VersionInfo.Major = Major;
VersionInfo.Minor = Minor;
VersionInfo.Update = Update;
+ VersionInfo.SDKVersion = SDKVersion;
}
/// Reuse an assembler instance
diff --git a/contrib/llvm/include/llvm/MC/MCCodeView.h b/contrib/llvm/include/llvm/MC/MCCodeView.h
index 1d9e3c6698cf..cef03a409f95 100644
--- a/contrib/llvm/include/llvm/MC/MCCodeView.h
+++ b/contrib/llvm/include/llvm/MC/MCCodeView.h
@@ -30,6 +30,7 @@ class CodeViewContext;
/// Instances of this class represent the information from a
/// .cv_loc directive.
class MCCVLoc {
+ const MCSymbol *Label = nullptr;
uint32_t FunctionId;
uint32_t FileNum;
uint32_t Line;
@@ -39,15 +40,17 @@ class MCCVLoc {
private: // CodeViewContext manages these
friend class CodeViewContext;
- MCCVLoc(unsigned functionid, unsigned fileNum, unsigned line, unsigned column,
- bool prologueend, bool isstmt)
- : FunctionId(functionid), FileNum(fileNum), Line(line), Column(column),
- PrologueEnd(prologueend), IsStmt(isstmt) {}
+ MCCVLoc(const MCSymbol *Label, unsigned functionid, unsigned fileNum,
+ unsigned line, unsigned column, bool prologueend, bool isstmt)
+ : Label(Label), FunctionId(functionid), FileNum(fileNum), Line(line),
+ Column(column), PrologueEnd(prologueend), IsStmt(isstmt) {}
// Allow the default copy constructor and assignment operator to be used
// for an MCCVLoc object.
public:
+ const MCSymbol *getLabel() const { return Label; }
+
unsigned getFunctionId() const { return FunctionId; }
/// Get the FileNum of this MCCVLoc.
@@ -62,6 +65,8 @@ public:
bool isPrologueEnd() const { return PrologueEnd; }
bool isStmt() const { return IsStmt; }
+ void setLabel(const MCSymbol *L) { Label = L; }
+
void setFunctionId(unsigned FID) { FunctionId = FID; }
/// Set the FileNum of this MCCVLoc.
@@ -80,31 +85,6 @@ public:
void setIsStmt(bool IS) { IsStmt = IS; }
};
-/// Instances of this class represent the line information for
-/// the CodeView line table entries. Which is created after a machine
-/// instruction is assembled and uses an address from a temporary label
-/// created at the current address in the current section and the info from
-/// the last .cv_loc directive seen as stored in the context.
-class MCCVLineEntry : public MCCVLoc {
- const MCSymbol *Label;
-
-private:
- // Allow the default copy constructor and assignment operator to be used
- // for an MCCVLineEntry object.
-
-public:
- // Constructor to create an MCCVLineEntry given a symbol and the dwarf loc.
- MCCVLineEntry(const MCSymbol *Label, const MCCVLoc loc)
- : MCCVLoc(loc), Label(Label) {}
-
- const MCSymbol *getLabel() const { return Label; }
-
- // This is called when an instruction is assembled into the specified
- // section and if there is information from the last .cv_loc directive that
- // has yet to have a line entry made for it is made.
- static void Make(MCObjectStreamer *MCOS);
-};
-
/// Information describing a function or inlined call site introduced by
/// .cv_func_id or .cv_inline_site_id. Accumulates information from .cv_loc
/// directives used with this function's id or the id of an inlined call site
@@ -183,32 +163,20 @@ public:
/// and sets CVLocSeen. When the next instruction is assembled an entry
/// in the line number table with this information and the address of the
/// instruction will be created.
- void setCurrentCVLoc(unsigned FunctionId, unsigned FileNo, unsigned Line,
- unsigned Column, bool PrologueEnd, bool IsStmt) {
- CurrentCVLoc.setFunctionId(FunctionId);
- CurrentCVLoc.setFileNum(FileNo);
- CurrentCVLoc.setLine(Line);
- CurrentCVLoc.setColumn(Column);
- CurrentCVLoc.setPrologueEnd(PrologueEnd);
- CurrentCVLoc.setIsStmt(IsStmt);
- CVLocSeen = true;
- }
-
- bool getCVLocSeen() { return CVLocSeen; }
- void clearCVLocSeen() { CVLocSeen = false; }
-
- const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
+ void recordCVLoc(MCContext &Ctx, const MCSymbol *Label, unsigned FunctionId,
+ unsigned FileNo, unsigned Line, unsigned Column,
+ bool PrologueEnd, bool IsStmt);
bool isValidCVFileNumber(unsigned FileNumber);
/// Add a line entry.
- void addLineEntry(const MCCVLineEntry &LineEntry);
+ void addLineEntry(const MCCVLoc &LineEntry);
- std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId);
+ std::vector<MCCVLoc> getFunctionLineEntries(unsigned FuncId);
std::pair<size_t, size_t> getLineExtent(unsigned FuncId);
- ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R);
+ ArrayRef<MCCVLoc> getLinesForExtent(size_t L, size_t R);
/// Emits a line table substream.
void emitLineTableForFunction(MCObjectStreamer &OS, unsigned FuncId,
@@ -226,7 +194,7 @@ public:
void encodeInlineLineTable(MCAsmLayout &Layout,
MCCVInlineLineTableFragment &F);
- void
+ MCFragment *
emitDefRange(MCObjectStreamer &OS,
ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
StringRef FixedSizePortion);
@@ -247,10 +215,6 @@ public:
std::pair<StringRef, unsigned> addToStringTable(StringRef S);
private:
- /// The current CodeView line information from the last .cv_loc directive.
- MCCVLoc CurrentCVLoc = MCCVLoc(0, 0, 0, 0, false, true);
- bool CVLocSeen = false;
-
/// Map from string to string table offset.
StringMap<unsigned> StringTable;
@@ -286,8 +250,8 @@ private:
/// id.
std::map<unsigned, std::pair<size_t, size_t>> MCCVLineStartStop;
- /// A collection of MCCVLineEntry for each section.
- std::vector<MCCVLineEntry> MCCVLines;
+ /// A collection of MCCVLoc for each section.
+ std::vector<MCCVLoc> MCCVLines;
/// All known functions and inlined call sites, indexed by function id.
std::vector<MCCVFunctionInfo> Functions;
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index a712e2d95cbc..3b8ac8b79e21 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -298,10 +298,6 @@ namespace llvm {
CodeViewContext &getCVContext();
- /// Clear the current cv_loc, if there is one. Avoids lazily creating a
- /// CodeViewContext if none is needed.
- void clearCVLocSeen();
-
void setAllowTemporaryLabels(bool Value) { AllowTemporaryLabels = Value; }
void setUseNamesOnTempLabels(bool Value) { UseNamesOnTempLabels = Value; }
diff --git a/contrib/llvm/include/llvm/MC/MCDwarf.h b/contrib/llvm/include/llvm/MC/MCDwarf.h
index 2bfaf19cf2c6..7b96e9aaca89 100644
--- a/contrib/llvm/include/llvm/MC/MCDwarf.h
+++ b/contrib/llvm/include/llvm/MC/MCDwarf.h
@@ -430,6 +430,7 @@ public:
OpUndefined,
OpRegister,
OpWindowSave,
+ OpNegateRAState,
OpGnuArgsSize
};
@@ -509,6 +510,11 @@ public:
return MCCFIInstruction(OpWindowSave, L, 0, 0, "");
}
+ /// .cfi_negate_ra_state AArch64 negate RA state.
+ static MCCFIInstruction createNegateRAState(MCSymbol *L) {
+ return MCCFIInstruction(OpNegateRAState, L, 0, 0, "");
+ }
+
/// .cfi_restore says that the rule for Register is now the same as it
/// was at the beginning of the function, after all initial instructions added
/// by .cfi_startproc were executed.
@@ -593,6 +599,7 @@ struct MCDwarfFrameInfo {
bool IsSignalFrame = false;
bool IsSimple = false;
unsigned RAReg = static_cast<unsigned>(INT_MAX);
+ bool IsBKeyFrame = false;
};
class MCDwarfFrameEmitter {
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
index bff58fef6af9..f226d6a45a5a 100644
--- a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
@@ -73,6 +74,8 @@ public:
switch (OSType) {
case Triple::CloudABI:
return ELF::ELFOSABI_CLOUDABI;
+ case Triple::HermitCore:
+ return ELF::ELFOSABI_STANDALONE;
case Triple::PS4:
case Triple::FreeBSD:
return ELF::ELFOSABI_FREEBSD;
@@ -90,6 +93,8 @@ public:
virtual void sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs);
+ virtual void addTargetSectionFlags(MCContext &Ctx, MCSectionELF &Sec);
+
/// \name Accessors
/// @{
uint8_t getOSABI() const { return OSABI; }
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
index 3fd58a169d4b..8cb6b86fd672 100644
--- a/contrib/llvm/include/llvm/MC/MCExpr.h
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -286,7 +286,9 @@ public:
VK_Hexagon_IE_GOT,
VK_WebAssembly_FUNCTION, // Function table index, rather than virtual addr
+ VK_WebAssembly_GLOBAL, // Global object index
VK_WebAssembly_TYPEINDEX,// Type table index
+ VK_WebAssembly_EVENT, // Event index
VK_AMDGPU_GOTPCREL32_LO, // symbol@gotpcrel32@lo
VK_AMDGPU_GOTPCREL32_HI, // symbol@gotpcrel32@hi
diff --git a/contrib/llvm/include/llvm/MC/MCInst.h b/contrib/llvm/include/llvm/MC/MCInst.h
index 67bb11a70387..d501b686bb2e 100644
--- a/contrib/llvm/include/llvm/MC/MCInst.h
+++ b/contrib/llvm/include/llvm/MC/MCInst.h
@@ -208,6 +208,8 @@ public:
/// string.
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer = nullptr,
StringRef Separator = " ") const;
+ void dump_pretty(raw_ostream &OS, StringRef Name,
+ StringRef Separator = " ") const;
};
inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
diff --git a/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
index e1673208d875..200f10f7d64b 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
@@ -23,6 +23,7 @@
namespace llvm {
class MCRegisterInfo;
+class Triple;
class MCInstrAnalysis {
protected:
@@ -87,24 +88,77 @@ public:
const MCInst &Inst,
APInt &Writes) const;
- /// Returns true if \param Inst is a dependency breaking instruction for the
- /// given subtarget.
+ /// Returns true if MI is a dependency breaking zero-idiom for the given
+ /// subtarget.
+ ///
+ /// Mask is used to identify input operands that have their dependency
+ /// broken. Each bit of the mask is associated with a specific input operand.
+ /// Bits associated with explicit input operands are laid out first in the
+ /// mask; implicit operands come after explicit operands.
+ ///
+ /// Dependencies are broken only for operands that have their corresponding bit
+ /// set. Operands that have their bit cleared, or that don't have a
+ /// corresponding bit in the mask don't have their dependency broken. Note
+ /// that Mask may not be big enough to describe all operands. The assumption
+ /// for operands that don't have a correspondent bit in the mask is that those
+ /// are still data dependent.
+ ///
+ /// The only exception to the rule is for when Mask has all zeroes.
+ /// A zero mask means: dependencies are broken for all explicit register
+ /// operands.
+ virtual bool isZeroIdiom(const MCInst &MI, APInt &Mask,
+ unsigned CPUID) const {
+ return false;
+ }
+
+ /// Returns true if MI is a dependency breaking instruction for the
+ /// subtarget associated with CPUID .
///
/// The value computed by a dependency breaking instruction is not dependent
/// on the inputs. An example of dependency breaking instruction on X86 is
/// `XOR %eax, %eax`.
- /// TODO: In future, we could implement an alternative approach where this
- /// method returns `true` if the input instruction is not dependent on
- /// some/all of its input operands. An APInt mask could then be used to
- /// identify independent operands.
- virtual bool isDependencyBreaking(const MCSubtargetInfo &STI,
- const MCInst &Inst) const;
+ ///
+ /// If MI is a dependency breaking instruction for subtarget CPUID, then Mask
+ /// can be inspected to identify independent operands.
+ ///
+ /// Essentially, each bit of the mask corresponds to an input operand.
+ /// Explicit operands are laid out first in the mask; implicit operands follow
+ /// explicit operands. Bits are set for operands that are independent.
+ ///
+ /// Note that the number of bits in Mask may not be equivalent to the sum of
+ /// explicit and implicit operands in MI. Operands that don't have a
+ /// corresponding bit in Mask are assumed "not independente".
+ ///
+ /// The only exception is for when Mask is all zeroes. That means: explicit
+ /// input operands of MI are independent.
+ virtual bool isDependencyBreaking(const MCInst &MI, APInt &Mask,
+ unsigned CPUID) const {
+ return isZeroIdiom(MI, Mask, CPUID);
+ }
+
+ /// Returns true if MI is a candidate for move elimination.
+ ///
+ /// Different subtargets may apply different constraints to optimizable
+ /// register moves. For example, on most X86 subtargets, a candidate for move
+ /// elimination cannot specify the same register for both source and
+ /// destination.
+ virtual bool isOptimizableRegisterMove(const MCInst &MI,
+ unsigned CPUID) const {
+ return false;
+ }
/// Given a branch instruction try to get the address the branch
/// targets. Return true on success, and the address in Target.
virtual bool
evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
uint64_t &Target) const;
+
+ /// Returns (PLT virtual address, GOT virtual address) pairs for PLT entries.
+ virtual std::vector<std::pair<uint64_t, uint64_t>>
+ findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
+ uint64_t GotPltSectionVA, const Triple &TargetTriple) const {
+ return {};
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCInstrDesc.h b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
index 3e000a2210e9..61e7d09afbcb 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
@@ -120,6 +120,7 @@ enum Flag {
HasOptionalDef,
Pseudo,
Return,
+ EHScopeReturn,
Call,
Barrier,
Terminator,
@@ -150,7 +151,8 @@ enum Flag {
InsertSubreg,
Convergent,
Add,
- Trap
+ Trap,
+ VariadicOpsAreDefs,
};
}
@@ -382,6 +384,11 @@ public:
/// additional values.
bool isConvergent() const { return Flags & (1ULL << MCID::Convergent); }
+ /// Return true if variadic operands of this instruction are definitions.
+ bool variadicOpsAreDefs() const {
+ return Flags & (1ULL << MCID::VariadicOpsAreDefs);
+ }
+
//===--------------------------------------------------------------------===//
// Side Effect Analysis
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
index 3a27ef8c8fee..f8142ccd8ac5 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/VersionTuple.h"
namespace llvm {
class MCContext;
@@ -42,12 +43,11 @@ protected:
/// dwarf unwind.
bool OmitDwarfIfHaveCompactUnwind;
- /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
- /// for EH.
- unsigned PersonalityEncoding;
- unsigned LSDAEncoding;
- unsigned FDECFIEncoding;
- unsigned TTypeEncoding;
+ /// FDE CFI encoding. Controls the encoding of the begin label in the
+ /// .eh_frame section. Unlike the LSDA encoding, personality encoding, and
+ /// type encodings, this is something that the assembler just "knows" about
+ /// its target
+ unsigned FDECFIEncoding = 0;
/// Compact unwind encoding indicating that we should emit only an EH frame.
unsigned CompactUnwindDwarfEHFrameOnly;
@@ -118,6 +118,8 @@ protected:
MCSection *DwarfAddrSection;
/// The DWARF v5 range list section.
MCSection *DwarfRnglistsSection;
+ /// The DWARF v5 locations list section.
+ MCSection *DwarfLoclistsSection;
/// The DWARF v5 range list section for fission.
MCSection *DwarfRnglistsDWOSection;
@@ -226,10 +228,7 @@ public:
return CommDirectiveSupportsAlignment;
}
- unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
- unsigned getLSDAEncoding() const { return LSDAEncoding; }
unsigned getFDEEncoding() const { return FDECFIEncoding; }
- unsigned getTTypeEncoding() const { return TTypeEncoding; }
unsigned getCompactUnwindDwarfEHFrameOnly() const {
return CompactUnwindDwarfEHFrameOnly;
@@ -243,6 +242,9 @@ public:
MCSection *getCompactUnwindSection() const { return CompactUnwindSection; }
MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
+ MCSection *getDwarfInfoSection(uint64_t Hash) const {
+ return getDwarfComdatSection(".debug_info", Hash);
+ }
MCSection *getDwarfLineSection() const { return DwarfLineSection; }
MCSection *getDwarfLineStrSection() const { return DwarfLineStrSection; }
MCSection *getDwarfFrameSection() const { return DwarfFrameSection; }
@@ -262,6 +264,7 @@ public:
MCSection *getDwarfARangesSection() const { return DwarfARangesSection; }
MCSection *getDwarfRangesSection() const { return DwarfRangesSection; }
MCSection *getDwarfRnglistsSection() const { return DwarfRnglistsSection; }
+ MCSection *getDwarfLoclistsSection() const { return DwarfLoclistsSection; }
MCSection *getDwarfMacinfoSection() const { return DwarfMacinfoSection; }
MCSection *getDwarfDebugNamesSection() const {
@@ -278,7 +281,9 @@ public:
return DwarfAccelTypesSection;
}
MCSection *getDwarfInfoDWOSection() const { return DwarfInfoDWOSection; }
- MCSection *getDwarfTypesSection(uint64_t Hash) const;
+ MCSection *getDwarfTypesSection(uint64_t Hash) const {
+ return getDwarfComdatSection(".debug_types", Hash);
+ }
MCSection *getDwarfTypesDWOSection() const { return DwarfTypesDWOSection; }
MCSection *getDwarfAbbrevDWOSection() const { return DwarfAbbrevDWOSection; }
MCSection *getDwarfStrDWOSection() const { return DwarfStrDWOSection; }
@@ -386,14 +391,22 @@ private:
bool PositionIndependent;
MCContext *Ctx;
Triple TT;
+ VersionTuple SDKVersion;
void initMachOMCObjectFileInfo(const Triple &T);
void initELFMCObjectFileInfo(const Triple &T, bool Large);
void initCOFFMCObjectFileInfo(const Triple &T);
void initWasmMCObjectFileInfo(const Triple &T);
+ MCSection *getDwarfComdatSection(const char *Name, uint64_t Hash) const;
public:
const Triple &getTargetTriple() const { return TT; }
+
+ void setSDKVersion(const VersionTuple &TheSDKVersion) {
+ SDKVersion = TheSDKVersion;
+ }
+
+ const VersionTuple &getSDKVersion() const { return SDKVersion; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
index 035206dce939..892909656c15 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -39,12 +39,21 @@ class MCObjectStreamer : public MCStreamer {
bool EmitEHFrame;
bool EmitDebugFrame;
SmallVector<MCSymbol *, 2> PendingLabels;
+ struct PendingMCFixup {
+ const MCSymbol *Sym;
+ MCFixup Fixup;
+ MCDataFragment *DF;
+ PendingMCFixup(const MCSymbol *McSym, MCDataFragment *F, MCFixup McFixup)
+ : Sym(McSym), Fixup(McFixup), DF(F) {}
+ };
+ SmallVector<PendingMCFixup, 2> PendingFixups;
virtual void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
MCSymbol *EmitCFILabel() override;
void EmitInstructionImpl(const MCInst &Inst, const MCSubtargetInfo &STI);
+ void resolvePendingFixups();
protected:
MCObjectStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
@@ -179,7 +188,9 @@ public:
///
/// Emit the absolute difference between \c Hi and \c Lo, as long as we can
/// compute it. Currently, that requires that both symbols are in the same
- /// data fragment. Otherwise, do nothing and return \c false.
+ /// data fragment and that the target has not specified that diff expressions
+ /// require relocations to be emitted. Otherwise, do nothing and return
+ /// \c false.
///
/// \pre Offset of \c Hi is greater than the offset \c Lo.
void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
index 207183a69b0e..2e9b8dfa3b26 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -30,7 +30,6 @@ class AsmLexer : public MCAsmLexer {
StringRef CurBuf;
bool IsAtStartOfLine = true;
bool IsAtStartOfStatement = true;
- bool IsParsingMSInlineAsm = false;
bool IsPeeking = false;
protected:
@@ -44,7 +43,6 @@ public:
~AsmLexer() override;
void setBuffer(StringRef Buf, const char *ptr = nullptr);
- void setParsingMSInlineAsm(bool V) { IsParsingMSInlineAsm = V; }
StringRef LexUntilEndOfStatement() override;
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index 10550b3370e8..ea13d1cdc09f 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -50,9 +50,9 @@ protected: // Can only create subclasses.
bool SkipSpace = true;
bool AllowAtInIdentifier;
bool IsAtStartOfStatement = true;
+ bool LexMasmIntegers = false;
AsmCommentConsumer *CommentConsumer = nullptr;
- bool AltMacroMode;
MCAsmLexer();
virtual AsmToken LexToken() = 0;
@@ -67,17 +67,9 @@ public:
MCAsmLexer &operator=(const MCAsmLexer &) = delete;
virtual ~MCAsmLexer();
- bool IsaAltMacroMode() {
- return AltMacroMode;
- }
-
- void SetAltMacroMode(bool AltMacroSet) {
- AltMacroMode = AltMacroSet;
- }
-
/// Consume the next token from the input stream and return it.
///
- /// The lexer will continuosly return the end-of-file token once the end of
+ /// The lexer will continuously return the end-of-file token once the end of
/// the main input file has been reached.
const AsmToken &Lex() {
assert(!CurTok.empty());
@@ -155,6 +147,10 @@ public:
void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
this->CommentConsumer = CommentConsumer;
}
+
+ /// Set whether to lex masm-style binary and hex literals. They look like
+ /// 0b1101 and 0ABCh respectively.
+ void setLexMasmIntegers(bool V) { LexMasmIntegers = V; }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index 0d56f36fbae8..b80289878e6e 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -122,17 +122,18 @@ public:
private:
MCTargetAsmParser *TargetParser = nullptr;
- unsigned ShowParsedOperands : 1;
-
protected: // Can only create subclasses.
MCAsmParser();
+ SmallVector<MCPendingError, 0> PendingErrors;
+
/// Flag tracking whether any errors have been encountered.
bool HadError = false;
+
/// Enable print [latency:throughput] in output file.
bool EnablePrintSchedInfo = false;
- SmallVector<MCPendingError, 1> PendingErrors;
+ bool ShowParsedOperands = false;
public:
MCAsmParser(const MCAsmParser &) = delete;
@@ -166,7 +167,7 @@ public:
void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }
void setEnablePrintSchedInfo(bool Value) { EnablePrintSchedInfo = Value; }
- bool shouldPrintSchedInfo() { return EnablePrintSchedInfo; }
+ bool shouldPrintSchedInfo() const { return EnablePrintSchedInfo; }
/// Run the parser on the input source buffer.
virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
index 135b5fab07ce..ccf13a6a4fb4 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -476,6 +476,9 @@ public:
return nullptr;
}
+ // For actions that have to be performed before a label is emitted
+ virtual void doBeforeLabelEmit(MCSymbol *Symbol) {}
+
virtual void onLabelParsed(MCSymbol *Symbol) {}
/// Ensure that all previously parsed instructions have been emitted to the
@@ -487,6 +490,9 @@ public:
MCContext &Ctx) {
return nullptr;
}
+
+ // For any checks or cleanups at the end of parsing.
+ virtual void onEndOfFile() {}
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
index 6edfc30b0aa6..8d8c677c77ea 100644
--- a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -41,7 +41,6 @@ public:
const uint16_t RegsSize;
const uint16_t RegSetSize;
const uint16_t ID;
- const uint16_t PhysRegSize;
const int8_t CopyCost;
const bool Allocatable;
@@ -80,11 +79,6 @@ public:
return contains(Reg1) && contains(Reg2);
}
- /// Return the size of the physical register in bytes.
- unsigned getPhysRegSize() const { return PhysRegSize; }
- /// Temporary function to allow out-of-tree targets to switch.
- unsigned getSize() const { return getPhysRegSize(); }
-
/// getCopyCost - Return the cost of copying a value between two registers in
/// this class. A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
diff --git a/contrib/llvm/include/llvm/MC/MCSchedule.h b/contrib/llvm/include/llvm/MC/MCSchedule.h
index f2f1dfb36918..689ac73cbdd1 100644
--- a/contrib/llvm/include/llvm/MC/MCSchedule.h
+++ b/contrib/llvm/include/llvm/MC/MCSchedule.h
@@ -142,6 +142,7 @@ struct MCSchedClassDesc {
struct MCRegisterCostEntry {
unsigned RegisterClassID;
unsigned Cost;
+ bool AllowMoveElimination;
};
/// A register file descriptor.
@@ -159,6 +160,12 @@ struct MCRegisterFileDesc {
uint16_t NumRegisterCostEntries;
// Index of the first cost entry in MCExtraProcessorInfo::RegisterCostTable.
uint16_t RegisterCostEntryIdx;
+ // A value of zero means: there is no limit in the number of moves that can be
+ // eliminated every cycle.
+ uint16_t MaxMovesEliminatedPerCycle;
+ // Ture if this register file only knows how to optimize register moves from
+ // known zero registers.
+ bool AllowZeroMoveEliminationOnly;
};
/// Provide extra details about the machine processor.
@@ -176,18 +183,8 @@ struct MCExtraProcessorInfo {
unsigned NumRegisterFiles;
const MCRegisterCostEntry *RegisterCostTable;
unsigned NumRegisterCostEntries;
-
- struct PfmCountersInfo {
- // An optional name of a performance counter that can be used to measure
- // cycles.
- const char *CycleCounter;
-
- // For each MCProcResourceDesc defined by the processor, an optional list of
- // names of performance counters that can be used to measure the resource
- // utilization.
- const char **IssueCounters;
- };
- PfmCountersInfo PfmCounters;
+ unsigned LoadQueueID;
+ unsigned StoreQueueID;
};
/// Machine model for scheduling, bundling, and heuristics.
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
index ba5c60d3ba58..eb210b4e9dfa 100644
--- a/contrib/llvm/include/llvm/MC/MCSection.h
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -78,6 +78,10 @@ private:
/// Whether this section has had instructions emitted into it.
bool HasInstructions : 1;
+ /// Whether this section has had data emitted into it.
+ /// Right now this is only used by the ARM backend.
+ bool HasData : 1;
+
bool IsRegistered : 1;
MCDummyFragment DummyFragment;
@@ -137,6 +141,9 @@ public:
bool hasInstructions() const { return HasInstructions; }
void setHasInstructions(bool Value) { HasInstructions = Value; }
+ bool hasData() const { return HasData; }
+ void setHasData(bool Value) { HasData = Value; }
+
bool isRegistered() const { return IsRegistered; }
void setIsRegistered(bool Value) { IsRegistered = Value; }
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index e4d0dc03b87c..f613d3a1943f 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -28,6 +28,7 @@
#include "llvm/Support/MD5.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <cstdint>
#include <memory>
@@ -109,6 +110,11 @@ public:
virtual void emitValue(const MCExpr *Value);
+ /// Emit the bytes in \p Data into the output.
+ ///
+ /// This is used to emit bytes in \p Data as sequence of .byte directives.
+ virtual void emitRawBytes(StringRef Data);
+
virtual void finish();
};
@@ -193,10 +199,6 @@ class MCStreamer {
WinEH::FrameInfo *CurrentWinFrameInfo;
- /// Retreive the current frame info if one is available and it is not yet
- /// closed. Otherwise, issue an error and return null.
- WinEH::FrameInfo *EnsureValidWinFrameInfo(SMLoc Loc);
-
/// Tracks an index to represent the order a symbol was emitted in.
/// Zero means we did not emit that symbol.
DenseMap<const MCSymbol *, unsigned> SymbolOrdering;
@@ -219,10 +221,6 @@ protected:
virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);
- /// When emitting an object file, create and emit a real label. When emitting
- /// textual assembly, this should do nothing to avoid polluting our output.
- virtual MCSymbol *EmitCFILabel();
-
WinEH::FrameInfo *getCurrentWinFrameInfo() {
return CurrentWinFrameInfo;
}
@@ -231,6 +229,9 @@ protected:
virtual void EmitRawTextImpl(StringRef String);
+ /// Returns true if the the .cv_loc directive is in the right section.
+ bool checkCVLocSection(unsigned FuncId, unsigned FileNo, SMLoc Loc);
+
public:
MCStreamer(const MCStreamer &) = delete;
MCStreamer &operator=(const MCStreamer &) = delete;
@@ -258,6 +259,14 @@ public:
return TargetStreamer.get();
}
+ /// When emitting an object file, create and emit a real label. When emitting
+ /// textual assembly, this should do nothing to avoid polluting our output.
+ virtual MCSymbol *EmitCFILabel();
+
+ /// Retreive the current frame info if one is available and it is not yet
+ /// closed. Otherwise, issue an error and return null.
+ WinEH::FrameInfo *EnsureValidWinFrameInfo(SMLoc Loc);
+
unsigned getNumFrameInfos() { return DwarfFrameInfos.size(); }
ArrayRef<MCDwarfFrameInfo> getDwarfFrameInfos() const {
return DwarfFrameInfos;
@@ -444,14 +453,17 @@ public:
/// Specify the Mach-O minimum deployment target version.
virtual void EmitVersionMin(MCVersionMinType Type, unsigned Major,
- unsigned Minor, unsigned Update) {}
+ unsigned Minor, unsigned Update,
+ VersionTuple SDKVersion) {}
/// Emit/Specify Mach-O build version command.
/// \p Platform should be one of MachO::PlatformType.
virtual void EmitBuildVersion(unsigned Platform, unsigned Major,
- unsigned Minor, unsigned Update) {}
+ unsigned Minor, unsigned Update,
+ VersionTuple SDKVersion) {}
- void EmitVersionForTarget(const Triple &Target);
+ void EmitVersionForTarget(const Triple &Target,
+ const VersionTuple &SDKVersion);
/// Note in the output that the specified \p Func is a Thumb mode
/// function (ARM target only).
@@ -794,6 +806,8 @@ public:
Optional<StringRef> Source,
unsigned CUID = 0);
+ virtual void EmitCFIBKeyFrame();
+
/// This implements the DWARF2 '.loc fileno lineno ...' assembler
/// directive.
virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -867,7 +881,7 @@ public:
virtual MCSymbol *getDwarfLineTableSymbol(unsigned CUID);
virtual void EmitCFISections(bool EH, bool Debug);
- void EmitCFIStartProc(bool IsSimple);
+ void EmitCFIStartProc(bool IsSimple, SMLoc Loc = SMLoc());
void EmitCFIEndProc();
virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
virtual void EmitCFIDefCfaOffset(int64_t Offset);
@@ -888,9 +902,15 @@ public:
virtual void EmitCFIUndefined(int64_t Register);
virtual void EmitCFIRegister(int64_t Register1, int64_t Register2);
virtual void EmitCFIWindowSave();
+ virtual void EmitCFINegateRAState();
virtual void EmitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc = SMLoc());
virtual void EmitWinCFIEndProc(SMLoc Loc = SMLoc());
+ /// This is used on platforms, such as Windows on ARM64, that require function
+ /// or funclet sizes to be emitted in .xdata before the End marker is emitted
+ /// for the frame. We cannot use the End marker, as it is not set at the
+ /// point of emitting .xdata, in order to indicate that the frame is active.
+ virtual void EmitWinCFIFuncletOrFuncEnd(SMLoc Loc = SMLoc());
virtual void EmitWinCFIStartChained(SMLoc Loc = SMLoc());
virtual void EmitWinCFIEndChained(SMLoc Loc = SMLoc());
virtual void EmitWinCFIPushReg(unsigned Register, SMLoc Loc = SMLoc());
diff --git a/contrib/llvm/include/llvm/MC/MCSymbolWasm.h b/contrib/llvm/include/llvm/MC/MCSymbolWasm.h
index e043453dc732..8e66dc881d0f 100644
--- a/contrib/llvm/include/llvm/MC/MCSymbolWasm.h
+++ b/contrib/llvm/include/llvm/MC/MCSymbolWasm.h
@@ -20,12 +20,9 @@ class MCSymbolWasm : public MCSymbol {
bool IsHidden = false;
bool IsComdat = false;
std::string ModuleName;
- SmallVector<wasm::ValType, 1> Returns;
- SmallVector<wasm::ValType, 4> Params;
- wasm::WasmGlobalType GlobalType;
- bool ParamsSet = false;
- bool ReturnsSet = false;
- bool GlobalTypeSet = false;
+ wasm::WasmSignature *Signature = nullptr;
+ Optional<wasm::WasmGlobalType> GlobalType;
+ Optional<wasm::WasmEventType> EventType;
/// An expression describing how to calculate the size of a symbol. If a
/// symbol has no size this field will be NULL.
@@ -35,8 +32,7 @@ public:
// Use a module name of "env" for now, for compatibility with existing tools.
// This is temporary, and may change, as the ABI is not yet stable.
MCSymbolWasm(const StringMapEntry<bool> *Name, bool isTemporary)
- : MCSymbol(SymbolKindWasm, Name, isTemporary),
- ModuleName("env") {}
+ : MCSymbol(SymbolKindWasm, Name, isTemporary), ModuleName("env") {}
static bool classof(const MCSymbol *S) { return S->isWasm(); }
const MCExpr *getSize() const { return SymbolSize; }
@@ -46,6 +42,7 @@ public:
bool isData() const { return Type == wasm::WASM_SYMBOL_TYPE_DATA; }
bool isGlobal() const { return Type == wasm::WASM_SYMBOL_TYPE_GLOBAL; }
bool isSection() const { return Type == wasm::WASM_SYMBOL_TYPE_SECTION; }
+ bool isEvent() const { return Type == wasm::WASM_SYMBOL_TYPE_EVENT; }
wasm::WasmSymbolType getType() const { return Type; }
void setType(wasm::WasmSymbolType type) { Type = type; }
@@ -61,37 +58,22 @@ public:
const StringRef getModuleName() const { return ModuleName; }
void setModuleName(StringRef Name) { ModuleName = Name; }
- const SmallVector<wasm::ValType, 1> &getReturns() const {
- assert(ReturnsSet);
- return Returns;
- }
-
- void setReturns(SmallVectorImpl<wasm::ValType> &&Rets) {
- ReturnsSet = true;
- Returns = std::move(Rets);
- }
-
- const SmallVector<wasm::ValType, 4> &getParams() const {
- assert(ParamsSet);
- return Params;
- }
-
- void setParams(SmallVectorImpl<wasm::ValType> &&Pars) {
- ParamsSet = true;
- Params = std::move(Pars);
- }
+ const wasm::WasmSignature *getSignature() const { return Signature; }
+ void setSignature(wasm::WasmSignature *Sig) { Signature = Sig; }
const wasm::WasmGlobalType &getGlobalType() const {
- assert(GlobalTypeSet);
- return GlobalType;
+ assert(GlobalType.hasValue());
+ return GlobalType.getValue();
}
+ void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; }
- void setGlobalType(wasm::WasmGlobalType GT) {
- GlobalTypeSet = true;
- GlobalType = GT;
+ const wasm::WasmEventType &getEventType() const {
+ assert(EventType.hasValue());
+ return EventType.getValue();
}
+ void setEventType(wasm::WasmEventType ET) { EventType = ET; }
};
-} // end namespace llvm
+} // end namespace llvm
#endif // LLVM_MC_MCSYMBOLWASM_H
diff --git a/contrib/llvm/include/llvm/MC/MCWasmObjectWriter.h b/contrib/llvm/include/llvm/MC/MCWasmObjectWriter.h
index e45030f302ff..6b788cfe96b9 100644
--- a/contrib/llvm/include/llvm/MC/MCWasmObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCWasmObjectWriter.h
@@ -51,6 +51,6 @@ std::unique_ptr<MCObjectWriter>
createWasmObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
raw_pwrite_stream &OS);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCWin64EH.h b/contrib/llvm/include/llvm/MC/MCWin64EH.h
index 83ea738de8c3..1a9f6f403d7c 100644
--- a/contrib/llvm/include/llvm/MC/MCWin64EH.h
+++ b/contrib/llvm/include/llvm/MC/MCWin64EH.h
@@ -56,6 +56,14 @@ public:
void Emit(MCStreamer &Streamer) const override;
void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI) const override;
};
+
+class ARM64UnwindEmitter : public WinEH::UnwindEmitter {
+public:
+ void Emit(MCStreamer &Streamer) const override;
+ void EmitUnwindInfo(MCStreamer &Streamer,
+ WinEH::FrameInfo *FI) const override;
+};
+
}
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCWinEH.h b/contrib/llvm/include/llvm/MC/MCWinEH.h
index 4ca52a6654eb..98ef0367a11d 100644
--- a/contrib/llvm/include/llvm/MC/MCWinEH.h
+++ b/contrib/llvm/include/llvm/MC/MCWinEH.h
@@ -10,6 +10,7 @@
#ifndef LLVM_MC_MCWINEH_H
#define LLVM_MC_MCWINEH_H
+#include "llvm/ADT/MapVector.h"
#include <vector>
namespace llvm {
@@ -20,9 +21,9 @@ class MCSymbol;
namespace WinEH {
struct Instruction {
const MCSymbol *Label;
- const unsigned Offset;
- const unsigned Register;
- const unsigned Operation;
+ unsigned Offset;
+ unsigned Register;
+ unsigned Operation;
Instruction(unsigned Op, MCSymbol *L, unsigned Reg, unsigned Off)
: Label(L), Offset(Off), Register(Reg), Operation(Op) {}
@@ -31,6 +32,7 @@ struct Instruction {
struct FrameInfo {
const MCSymbol *Begin = nullptr;
const MCSymbol *End = nullptr;
+ const MCSymbol *FuncletOrFuncEnd = nullptr;
const MCSymbol *ExceptionHandler = nullptr;
const MCSymbol *Function = nullptr;
const MCSymbol *PrologEnd = nullptr;
@@ -43,6 +45,7 @@ struct FrameInfo {
int LastFrameInst = -1;
const FrameInfo *ChainedParent = nullptr;
std::vector<Instruction> Instructions;
+ MapVector<MCSymbol*, std::vector<Instruction>> EpilogMap;
FrameInfo() = default;
FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel)
diff --git a/contrib/llvm/include/llvm/MCA/Context.h b/contrib/llvm/include/llvm/MCA/Context.h
new file mode 100644
index 000000000000..6b2bee0fdc42
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Context.h
@@ -0,0 +1,69 @@
+//===---------------------------- Context.h ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a class for holding ownership of various simulated
+/// hardware units. A Context also provides a utility routine for constructing
+/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
+/// stages.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_CONTEXT_H
+#define LLVM_MCA_CONTEXT_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/InstrBuilder.h"
+#include "llvm/MCA/Pipeline.h"
+#include "llvm/MCA/SourceMgr.h"
+#include <memory>
+
+namespace llvm {
+namespace mca {
+
+/// This is a convenience struct to hold the parameters necessary for creating
+/// the pre-built "default" out-of-order pipeline.
+struct PipelineOptions {
+ PipelineOptions(unsigned DW, unsigned RFS, unsigned LQS, unsigned SQS,
+ bool NoAlias)
+ : DispatchWidth(DW), RegisterFileSize(RFS), LoadQueueSize(LQS),
+ StoreQueueSize(SQS), AssumeNoAlias(NoAlias) {}
+ unsigned DispatchWidth;
+ unsigned RegisterFileSize;
+ unsigned LoadQueueSize;
+ unsigned StoreQueueSize;
+ bool AssumeNoAlias;
+};
+
+class Context {
+ SmallVector<std::unique_ptr<HardwareUnit>, 4> Hardware;
+ const MCRegisterInfo &MRI;
+ const MCSubtargetInfo &STI;
+
+public:
+ Context(const MCRegisterInfo &R, const MCSubtargetInfo &S) : MRI(R), STI(S) {}
+ Context(const Context &C) = delete;
+ Context &operator=(const Context &C) = delete;
+
+ void addHardwareUnit(std::unique_ptr<HardwareUnit> H) {
+ Hardware.push_back(std::move(H));
+ }
+
+ /// Construct a basic pipeline for simulating an out-of-order pipeline.
+ /// This pipeline consists of Fetch, Dispatch, Execute, and Retire stages.
+ std::unique_ptr<Pipeline> createDefaultPipeline(const PipelineOptions &Opts,
+ InstrBuilder &IB,
+ SourceMgr &SrcMgr);
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_CONTEXT_H
diff --git a/contrib/llvm/include/llvm/MCA/HWEventListener.h b/contrib/llvm/include/llvm/MCA/HWEventListener.h
new file mode 100644
index 000000000000..3b32b2cd6577
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HWEventListener.h
@@ -0,0 +1,156 @@
+//===----------------------- HWEventListener.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the main interface for hardware event listeners.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_HWEVENTLISTENER_H
+#define LLVM_MCA_HWEVENTLISTENER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+// An HWInstructionEvent represents state changes of instructions that
+// listeners might be interested in. Listeners can choose to ignore any event
+// they are not interested in.
+class HWInstructionEvent {
+public:
+ // This is the list of event types that are shared by all targets, that
+ // generic subtarget-agnostic classes (e.g., Pipeline, HWInstructionEvent,
+ // ...) and generic Views can manipulate.
+ // Subtargets are free to define additional event types, that are goin to be
+ // handled by generic components as opaque values, but can still be
+ // emitted by subtarget-specific pipeline stages (e.g., ExecuteStage,
+ // DispatchStage, ...) and interpreted by subtarget-specific EventListener
+ // implementations.
+ enum GenericEventType {
+ Invalid = 0,
+ // Events generated by the Retire Control Unit.
+ Retired,
+ // Events generated by the Scheduler.
+ Ready,
+ Issued,
+ Executed,
+ // Events generated by the Dispatch logic.
+ Dispatched,
+
+ LastGenericEventType,
+ };
+
+ HWInstructionEvent(unsigned type, const InstRef &Inst)
+ : Type(type), IR(Inst) {}
+
+ // The event type. The exact meaning depends on the subtarget.
+ const unsigned Type;
+
+ // The instruction this event was generated for.
+ const InstRef &IR;
+};
+
+class HWInstructionIssuedEvent : public HWInstructionEvent {
+public:
+ using ResourceRef = std::pair<uint64_t, uint64_t>;
+ HWInstructionIssuedEvent(const InstRef &IR,
+ ArrayRef<std::pair<ResourceRef, ResourceCycles>> UR)
+ : HWInstructionEvent(HWInstructionEvent::Issued, IR), UsedResources(UR) {}
+
+ ArrayRef<std::pair<ResourceRef, ResourceCycles>> UsedResources;
+};
+
+class HWInstructionDispatchedEvent : public HWInstructionEvent {
+public:
+ HWInstructionDispatchedEvent(const InstRef &IR, ArrayRef<unsigned> Regs,
+ unsigned UOps)
+ : HWInstructionEvent(HWInstructionEvent::Dispatched, IR),
+ UsedPhysRegs(Regs), MicroOpcodes(UOps) {}
+ // Number of physical register allocated for this instruction. There is one
+ // entry per register file.
+ ArrayRef<unsigned> UsedPhysRegs;
+ // Number of micro opcodes dispatched.
+ // This field is often set to the total number of micro-opcodes specified by
+ // the instruction descriptor of IR.
+ // The only exception is when IR declares a number of micro opcodes
+ // which exceeds the processor DispatchWidth, and - by construction - it
+ // requires multiple cycles to be fully dispatched. In that particular case,
+ // the dispatch logic would generate more than one dispatch event (one per
+ // cycle), and each event would declare how many micro opcodes are effectively
+ // been dispatched to the schedulers.
+ unsigned MicroOpcodes;
+};
+
+class HWInstructionRetiredEvent : public HWInstructionEvent {
+public:
+ HWInstructionRetiredEvent(const InstRef &IR, ArrayRef<unsigned> Regs)
+ : HWInstructionEvent(HWInstructionEvent::Retired, IR),
+ FreedPhysRegs(Regs) {}
+ // Number of register writes that have been architecturally committed. There
+ // is one entry per register file.
+ ArrayRef<unsigned> FreedPhysRegs;
+};
+
+// A HWStallEvent represents a pipeline stall caused by the lack of hardware
+// resources.
+class HWStallEvent {
+public:
+ enum GenericEventType {
+ Invalid = 0,
+ // Generic stall events generated by the DispatchStage.
+ RegisterFileStall,
+ RetireControlUnitStall,
+ // Generic stall events generated by the Scheduler.
+ DispatchGroupStall,
+ SchedulerQueueFull,
+ LoadQueueFull,
+ StoreQueueFull,
+ LastGenericEvent
+ };
+
+ HWStallEvent(unsigned type, const InstRef &Inst) : Type(type), IR(Inst) {}
+
+ // The exact meaning of the stall event type depends on the subtarget.
+ const unsigned Type;
+
+ // The instruction this event was generated for.
+ const InstRef &IR;
+};
+
+class HWEventListener {
+public:
+ // Generic events generated by the pipeline.
+ virtual void onCycleBegin() {}
+ virtual void onCycleEnd() {}
+
+ virtual void onEvent(const HWInstructionEvent &Event) {}
+ virtual void onEvent(const HWStallEvent &Event) {}
+
+ using ResourceRef = std::pair<uint64_t, uint64_t>;
+ virtual void onResourceAvailable(const ResourceRef &RRef) {}
+
+ // Events generated by the Scheduler when buffered resources are
+ // consumed/freed for an instruction.
+ virtual void onReservedBuffers(const InstRef &Inst,
+ ArrayRef<unsigned> Buffers) {}
+ virtual void onReleasedBuffers(const InstRef &Inst,
+ ArrayRef<unsigned> Buffers) {}
+
+ virtual ~HWEventListener() {}
+
+private:
+ virtual void anchor();
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_HWEVENTLISTENER_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/HardwareUnit.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/HardwareUnit.h
new file mode 100644
index 000000000000..104a2009f219
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/HardwareUnit.h
@@ -0,0 +1,33 @@
+//===-------------------------- HardwareUnit.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a base class for describing a simulated hardware
+/// unit. These units are used to construct a simulated backend.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_HARDWAREUNIT_H
+#define LLVM_MCA_HARDWAREUNIT_H
+
+namespace llvm {
+namespace mca {
+
+class HardwareUnit {
+ HardwareUnit(const HardwareUnit &H) = delete;
+ HardwareUnit &operator=(const HardwareUnit &H) = delete;
+
+public:
+ HardwareUnit() = default;
+ virtual ~HardwareUnit();
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_HARDWAREUNIT_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h
new file mode 100644
index 000000000000..e217fc50f780
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h
@@ -0,0 +1,207 @@
+//===------------------------- LSUnit.h --------------------------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A Load/Store unit class that models load/store queues and that implements
+/// a simple weak memory consistency model.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_LSUNIT_H
+#define LLVM_MCA_LSUNIT_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+
+namespace llvm {
+namespace mca {
+
+class InstRef;
+class Scheduler;
+
+/// A Load/Store Unit implementing a load and store queues.
+///
+/// This class implements a load queue and a store queue to emulate the
+/// out-of-order execution of memory operations.
+/// Each load (or store) consumes an entry in the load (or store) queue.
+///
+/// Rules are:
+/// 1) A younger load is allowed to pass an older load only if there are no
+/// stores nor barriers in between the two loads.
+/// 2) An younger store is not allowed to pass an older store.
+/// 3) A younger store is not allowed to pass an older load.
+/// 4) A younger load is allowed to pass an older store only if the load does
+/// not alias with the store.
+///
+/// This class optimistically assumes that loads don't alias store operations.
+/// Under this assumption, younger loads are always allowed to pass older
+/// stores (this would only affects rule 4).
+/// Essentially, this class doesn't perform any sort alias analysis to
+/// identify aliasing loads and stores.
+///
+/// To enforce aliasing between loads and stores, flag `AssumeNoAlias` must be
+/// set to `false` by the constructor of LSUnit.
+///
+/// Note that this class doesn't know about the existence of different memory
+/// types for memory operations (example: write-through, write-combining, etc.).
+/// Derived classes are responsible for implementing that extra knowledge, and
+/// provide different sets of rules for loads and stores by overriding method
+/// `isReady()`.
+/// To emulate a write-combining memory type, rule 2. must be relaxed in a
+/// derived class to enable the reordering of non-aliasing store operations.
+///
+/// No assumptions are made by this class on the size of the store buffer. This
+/// class doesn't know how to identify cases where store-to-load forwarding may
+/// occur.
+///
+/// LSUnit doesn't attempt to predict whether a load or store hits or misses
+/// the L1 cache. To be more specific, LSUnit doesn't know anything about
+/// cache hierarchy and memory types.
+/// It only knows if an instruction "mayLoad" and/or "mayStore". For loads, the
+/// scheduling model provides an "optimistic" load-to-use latency (which usually
+/// matches the load-to-use latency for when there is a hit in the L1D).
+/// Derived classes may expand this knowledge.
+///
+/// Class MCInstrDesc in LLVM doesn't know about serializing operations, nor
+/// memory-barrier like instructions.
+/// LSUnit conservatively assumes that an instruction which `mayLoad` and has
+/// `unmodeled side effects` behave like a "soft" load-barrier. That means, it
+/// serializes loads without forcing a flush of the load queue.
+/// Similarly, instructions that both `mayStore` and have `unmodeled side
+/// effects` are treated like store barriers. A full memory
+/// barrier is a 'mayLoad' and 'mayStore' instruction with unmodeled side
+/// effects. This is obviously inaccurate, but this is the best that we can do
+/// at the moment.
+///
+/// Each load/store barrier consumes one entry in the load/store queue. A
+/// load/store barrier enforces ordering of loads/stores:
+/// - A younger load cannot pass a load barrier.
+/// - A younger store cannot pass a store barrier.
+///
+/// A younger load has to wait for the memory load barrier to execute.
+/// A load/store barrier is "executed" when it becomes the oldest entry in
+/// the load/store queue(s). That also means, all the older loads/stores have
+/// already been executed.
+class LSUnit : public HardwareUnit {
+ // Load queue size.
+ // LQ_Size == 0 means that there are infinite slots in the load queue.
+ unsigned LQ_Size;
+
+ // Store queue size.
+ // SQ_Size == 0 means that there are infinite slots in the store queue.
+ unsigned SQ_Size;
+
+ // If true, loads will never alias with stores. This is the default.
+ bool NoAlias;
+
+ // When a `MayLoad` instruction is dispatched to the schedulers for execution,
+ // the LSUnit reserves an entry in the `LoadQueue` for it.
+ //
+ // LoadQueue keeps track of all the loads that are in-flight. A load
+ // instruction is eventually removed from the LoadQueue when it reaches
+ // completion stage. That means, a load leaves the queue whe it is 'executed',
+ // and its value can be forwarded on the data path to outside units.
+ //
+ // This class doesn't know about the latency of a load instruction. So, it
+ // conservatively/pessimistically assumes that the latency of a load opcode
+ // matches the instruction latency.
+ //
+ // FIXME: In the absence of cache misses (i.e. L1I/L1D/iTLB/dTLB hits/misses),
+ // and load/store conflicts, the latency of a load is determined by the depth
+ // of the load pipeline. So, we could use field `LoadLatency` in the
+ // MCSchedModel to model that latency.
+ // Field `LoadLatency` often matches the so-called 'load-to-use' latency from
+ // L1D, and it usually already accounts for any extra latency due to data
+ // forwarding.
+ // When doing throughput analysis, `LoadLatency` is likely to
+ // be a better predictor of load latency than instruction latency. This is
+ // particularly true when simulating code with temporal/spatial locality of
+ // memory accesses.
+ // Using `LoadLatency` (instead of the instruction latency) is also expected
+ // to improve the load queue allocation for long latency instructions with
+ // folded memory operands (See PR39829).
+ //
+ // FIXME: On some processors, load/store operations are split into multiple
+ // uOps. For example, X86 AMD Jaguar natively supports 128-bit data types, but
+ // not 256-bit data types. So, a 256-bit load is effectively split into two
+ // 128-bit loads, and each split load consumes one 'LoadQueue' entry. For
+ // simplicity, this class optimistically assumes that a load instruction only
+ // consumes one entry in the LoadQueue. Similarly, store instructions only
+ // consume a single entry in the StoreQueue.
+ // In future, we should reassess the quality of this design, and consider
+ // alternative approaches that let instructions specify the number of
+ // load/store queue entries which they consume at dispatch stage (See
+ // PR39830).
+ SmallSet<unsigned, 16> LoadQueue;
+ SmallSet<unsigned, 16> StoreQueue;
+
+ void assignLQSlot(unsigned Index);
+ void assignSQSlot(unsigned Index);
+ bool isReadyNoAlias(unsigned Index) const;
+
+ // An instruction that both 'mayStore' and 'HasUnmodeledSideEffects' is
+ // conservatively treated as a store barrier. It forces older store to be
+ // executed before newer stores are issued.
+ SmallSet<unsigned, 8> StoreBarriers;
+
+ // An instruction that both 'MayLoad' and 'HasUnmodeledSideEffects' is
+ // conservatively treated as a load barrier. It forces older loads to execute
+ // before newer loads are issued.
+ SmallSet<unsigned, 8> LoadBarriers;
+
+ bool isSQEmpty() const { return StoreQueue.empty(); }
+ bool isLQEmpty() const { return LoadQueue.empty(); }
+ bool isSQFull() const { return SQ_Size != 0 && StoreQueue.size() == SQ_Size; }
+ bool isLQFull() const { return LQ_Size != 0 && LoadQueue.size() == LQ_Size; }
+
+public:
+ LSUnit(const MCSchedModel &SM, unsigned LQ = 0, unsigned SQ = 0,
+ bool AssumeNoAlias = false);
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+
+ enum Status { LSU_AVAILABLE = 0, LSU_LQUEUE_FULL, LSU_SQUEUE_FULL };
+
+ // Returns LSU_AVAILABLE if there are enough load/store queue entries to serve
+ // IR. It also returns LSU_AVAILABLE if IR is not a memory operation.
+ Status isAvailable(const InstRef &IR) const;
+
+ // Allocates load/store queue resources for IR.
+ //
+ // This method assumes that a previous call to `isAvailable(IR)` returned
+ // LSU_AVAILABLE, and that IR is a memory operation.
+ void dispatch(const InstRef &IR);
+
+ // By default, rules are:
+ // 1. A store may not pass a previous store.
+ // 2. A load may not pass a previous store unless flag 'NoAlias' is set.
+ // 3. A load may pass a previous load.
+ // 4. A store may not pass a previous load (regardless of flag 'NoAlias').
+ // 5. A load has to wait until an older load barrier is fully executed.
+ // 6. A store has to wait until an older store barrier is fully executed.
+ virtual bool isReady(const InstRef &IR) const;
+
+ // Load and store instructions are tracked by their corresponding queues from
+ // dispatch until the "instruction executed" event.
+ // Only when a load instruction reaches the 'Executed' stage, its value
+ // becomes available to the users. At that point, the load no longer needs to
+ // be tracked by the load queue.
+ // FIXME: For simplicity, we optimistically assume a similar behavior for
+ // store instructions. In practice, store operations don't tend to leave the
+ // store queue until they reach the 'Retired' stage (See PR39830).
+ void onInstructionExecuted(const InstRef &IR);
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_LSUNIT_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/RegisterFile.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/RegisterFile.h
new file mode 100644
index 000000000000..c23ab0389234
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/RegisterFile.h
@@ -0,0 +1,239 @@
+//===--------------------- RegisterFile.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a register mapping file class. This class is responsible
+/// for managing hardware register files and the tracking of data dependencies
+/// between registers.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_REGISTER_FILE_H
+#define LLVM_MCA_REGISTER_FILE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+class ReadState;
+class WriteState;
+class WriteRef;
+
+/// Manages hardware register files, and tracks register definitions for
+/// register renaming purposes.
+class RegisterFile : public HardwareUnit {
+ const MCRegisterInfo &MRI;
+
+ // class RegisterMappingTracker is a physical register file (PRF) descriptor.
+ // There is one RegisterMappingTracker for every PRF definition in the
+ // scheduling model.
+ //
+ // An instance of RegisterMappingTracker tracks the number of physical
+ // registers available for renaming. It also tracks the number of register
+ // moves eliminated per cycle.
+ struct RegisterMappingTracker {
+ // The total number of physical registers that are available in this
+ // register file for register renaming purpouses. A value of zero for this
+ // field means: this register file has an unbounded number of physical
+ // registers.
+ const unsigned NumPhysRegs;
+ // Number of physical registers that are currently in use.
+ unsigned NumUsedPhysRegs;
+
+ // Maximum number of register moves that can be eliminated by this PRF every
+ // cycle. A value of zero means that there is no limit in the number of
+ // moves which can be eliminated every cycle.
+ const unsigned MaxMoveEliminatedPerCycle;
+
+ // Number of register moves eliminated during this cycle.
+ //
+ // This value is increased by one every time a register move is eliminated.
+ // Every new cycle, this value is reset to zero.
+ // A move can be eliminated only if MaxMoveEliminatedPerCycle is zero, or if
+ // NumMoveEliminated is less than MaxMoveEliminatedPerCycle.
+ unsigned NumMoveEliminated;
+
+ // If set, move elimination is restricted to zero-register moves only.
+ bool AllowZeroMoveEliminationOnly;
+
+ RegisterMappingTracker(unsigned NumPhysRegisters,
+ unsigned MaxMoveEliminated = 0U,
+ bool AllowZeroMoveElimOnly = false)
+ : NumPhysRegs(NumPhysRegisters), NumUsedPhysRegs(0),
+ MaxMoveEliminatedPerCycle(MaxMoveEliminated), NumMoveEliminated(0U),
+ AllowZeroMoveEliminationOnly(AllowZeroMoveElimOnly) {}
+ };
+
+ // A vector of register file descriptors. This set always contains at least
+ // one entry. Entry at index #0 is reserved. That entry describes a register
+ // file with an unbounded number of physical registers that "sees" all the
+ // hardware registers declared by the target (i.e. all the register
+ // definitions in the target specific `XYZRegisterInfo.td` - where `XYZ` is
+ // the target name).
+ //
+ // Users can limit the number of physical registers that are available in
+ // regsiter file #0 specifying command line flag `-register-file-size=<uint>`.
+ SmallVector<RegisterMappingTracker, 4> RegisterFiles;
+
+ // This type is used to propagate information about the owner of a register,
+ // and the cost of allocating it in the PRF. Register cost is defined as the
+ // number of physical registers consumed by the PRF to allocate a user
+ // register.
+ //
+ // For example: on X86 BtVer2, a YMM register consumes 2 128-bit physical
+ // registers. So, the cost of allocating a YMM register in BtVer2 is 2.
+ using IndexPlusCostPairTy = std::pair<unsigned, unsigned>;
+
+ // Struct RegisterRenamingInfo is used to map logical registers to register
+ // files.
+ //
+ // There is a RegisterRenamingInfo object for every logical register defined
+ // by the target. RegisteRenamingInfo objects are stored into vector
+ // `RegisterMappings`, and MCPhysReg IDs can be used to reference
+ // elements in that vector.
+ //
+ // Each RegisterRenamingInfo is owned by a PRF, and field `IndexPlusCost`
+ // specifies both the owning PRF, as well as the number of physical registers
+ // consumed at register renaming stage.
+ //
+ // Field `AllowMoveElimination` is set for registers that are used as
+ // destination by optimizable register moves.
+ //
+ // Field `AliasRegID` is set by writes from register moves that have been
+ // eliminated at register renaming stage. A move eliminated at register
+ // renaming stage is effectively bypassed, and its write aliases the source
+ // register definition.
+ struct RegisterRenamingInfo {
+ IndexPlusCostPairTy IndexPlusCost;
+ MCPhysReg RenameAs;
+ MCPhysReg AliasRegID;
+ bool AllowMoveElimination;
+ RegisterRenamingInfo()
+ : IndexPlusCost(std::make_pair(0U, 1U)), RenameAs(0U), AliasRegID(0U),
+ AllowMoveElimination(false) {}
+ };
+
+ // RegisterMapping objects are mainly used to track physical register
+ // definitions and resolve data dependencies.
+ //
+ // Every register declared by the Target is associated with an instance of
+ // RegisterMapping. RegisterMapping objects keep track of writes to a logical
+ // register. That information is used by class RegisterFile to resolve data
+ // dependencies, and correctly set latencies for register uses.
+ //
+ // This implementation does not allow overlapping register files. The only
+ // register file that is allowed to overlap with other register files is
+ // register file #0. If we exclude register #0, every register is "owned" by
+ // at most one register file.
+ using RegisterMapping = std::pair<WriteRef, RegisterRenamingInfo>;
+
+ // There is one entry per each register defined by the target.
+ std::vector<RegisterMapping> RegisterMappings;
+
+ // Used to track zero registers. There is one bit for each register defined by
+ // the target. Bits are set for registers that are known to be zero.
+ APInt ZeroRegisters;
+
+ // This method creates a new register file descriptor.
+ // The new register file owns all of the registers declared by register
+ // classes in the 'RegisterClasses' set.
+ //
+ // Processor models allow the definition of RegisterFile(s) via tablegen. For
+ // example, this is a tablegen definition for a x86 register file for
+ // XMM[0-15] and YMM[0-15], that allows up to 60 renames (each rename costs 1
+ // physical register).
+ //
+ // def FPRegisterFile : RegisterFile<60, [VR128RegClass, VR256RegClass]>
+ //
+ // Here FPRegisterFile contains all the registers defined by register class
+ // VR128RegClass and VR256RegClass. FPRegisterFile implements 60
+ // registers which can be used for register renaming purpose.
+ void addRegisterFile(const MCRegisterFileDesc &RF,
+ ArrayRef<MCRegisterCostEntry> Entries);
+
+ // Consumes physical registers in each register file specified by the
+ // `IndexPlusCostPairTy`. This method is called from `addRegisterMapping()`.
+ void allocatePhysRegs(const RegisterRenamingInfo &Entry,
+ MutableArrayRef<unsigned> UsedPhysRegs);
+
+ // Releases previously allocated physical registers from the register file(s).
+ // This method is called from `invalidateRegisterMapping()`.
+ void freePhysRegs(const RegisterRenamingInfo &Entry,
+ MutableArrayRef<unsigned> FreedPhysRegs);
+
+ // Collects writes that are in a RAW dependency with RS.
+ // This method is called from `addRegisterRead()`.
+ void collectWrites(const ReadState &RS,
+ SmallVectorImpl<WriteRef> &Writes) const;
+
+ // Create an instance of RegisterMappingTracker for every register file
+ // specified by the processor model.
+ // If no register file is specified, then this method creates a default
+ // register file with an unbounded number of physical registers.
+ void initialize(const MCSchedModel &SM, unsigned NumRegs);
+
+public:
+ RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
+ unsigned NumRegs = 0);
+
+ // This method updates the register mappings inserting a new register
+ // definition. This method is also responsible for updating the number of
+ // allocated physical registers in each register file modified by the write.
+ // No physical regiser is allocated if this write is from a zero-idiom.
+ void addRegisterWrite(WriteRef Write, MutableArrayRef<unsigned> UsedPhysRegs);
+
+ // Collect writes that are in a data dependency with RS, and update RS
+ // internal state.
+ void addRegisterRead(ReadState &RS, SmallVectorImpl<WriteRef> &Writes) const;
+
+ // Removes write \param WS from the register mappings.
+ // Physical registers may be released to reflect this update.
+ // No registers are released if this write is from a zero-idiom.
+ void removeRegisterWrite(const WriteState &WS,
+ MutableArrayRef<unsigned> FreedPhysRegs);
+
+ // Returns true if a move from RS to WS can be eliminated.
+ // On success, it updates WriteState by setting flag `WS.isEliminated`.
+ // If RS is a read from a zero register, and WS is eliminated, then
+ // `WS.WritesZero` is also set, so that method addRegisterWrite() would not
+ // reserve a physical register for it.
+ bool tryEliminateMove(WriteState &WS, ReadState &RS);
+
+ // Checks if there are enough physical registers in the register files.
+ // Returns a "response mask" where each bit represents the response from a
+ // different register file. A mask of all zeroes means that all register
+ // files are available. Otherwise, the mask can be used to identify which
+ // register file was busy. This sematic allows us to classify dispatch
+ // stalls caused by the lack of register file resources.
+ //
+ // Current implementation can simulate up to 32 register files (including the
+ // special register file at index #0).
+ unsigned isAvailable(ArrayRef<unsigned> Regs) const;
+
+ // Returns the number of PRFs implemented by this processor.
+ unsigned getNumRegisterFiles() const { return RegisterFiles.size(); }
+
+ // Notify each PRF that a new cycle just started.
+ void cycleStart();
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_REGISTER_FILE_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h
new file mode 100644
index 000000000000..549a46c247fe
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h
@@ -0,0 +1,410 @@
+//===--------------------- ResourceManager.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The classes here represent processor resource units and their management
+/// strategy. These classes are managed by the Scheduler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RESOURCE_MANAGER_H
+#define LLVM_MCA_RESOURCE_MANAGER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+/// Used to notify the internal state of a processor resource.
+///
+/// A processor resource is available if it is not reserved, and there are
+/// available slots in the buffer. A processor resource is unavailable if it
+/// is either reserved, or the associated buffer is full. A processor resource
+/// with a buffer size of -1 is always available if it is not reserved.
+///
+/// Values of type ResourceStateEvent are returned by method
+/// ResourceState::isBufferAvailable(), which is used to query the internal
+/// state of a resource.
+///
+/// The naming convention for resource state events is:
+/// * Event names start with prefix RS_
+/// * Prefix RS_ is followed by a string describing the actual resource state.
+enum ResourceStateEvent {
+ RS_BUFFER_AVAILABLE,
+ RS_BUFFER_UNAVAILABLE,
+ RS_RESERVED
+};
+
+/// Resource allocation strategy used by hardware scheduler resources.
+class ResourceStrategy {
+ ResourceStrategy(const ResourceStrategy &) = delete;
+ ResourceStrategy &operator=(const ResourceStrategy &) = delete;
+
+public:
+ ResourceStrategy() {}
+ virtual ~ResourceStrategy();
+
+ /// Selects a processor resource unit from a ReadyMask.
+ virtual uint64_t select(uint64_t ReadyMask) = 0;
+
+ /// Called by the ResourceManager when a processor resource group, or a
+ /// processor resource with multiple units has become unavailable.
+ ///
+ /// The default strategy uses this information to bias its selection logic.
+ virtual void used(uint64_t ResourceMask) {}
+};
+
+/// Default resource allocation strategy used by processor resource groups and
+/// processor resources with multiple units.
+class DefaultResourceStrategy final : public ResourceStrategy {
+ /// A Mask of resource unit identifiers.
+ ///
+ /// There is one bit set for every available resource unit.
+ /// It defaults to the value of field ResourceSizeMask in ResourceState.
+ const uint64_t ResourceUnitMask;
+
+ /// A simple round-robin selector for processor resource units.
+ /// Each bit of this mask identifies a sub resource within a group.
+ ///
+ /// As an example, lets assume that this is a default policy for a
+ /// processor resource group composed by the following three units:
+ /// ResourceA -- 0b001
+ /// ResourceB -- 0b010
+ /// ResourceC -- 0b100
+ ///
+ /// Field NextInSequenceMask is used to select the next unit from the set of
+ /// resource units. It defaults to the value of field `ResourceUnitMasks` (in
+ /// this example, it defaults to mask '0b111').
+ ///
+ /// The round-robin selector would firstly select 'ResourceC', then
+ /// 'ResourceB', and eventually 'ResourceA'. When a resource R is used, the
+ /// corresponding bit in NextInSequenceMask is cleared. For example, if
+ /// 'ResourceC' is selected, then the new value of NextInSequenceMask becomes
+ /// 0xb011.
+ ///
+ /// When NextInSequenceMask becomes zero, it is automatically reset to the
+ /// default value (i.e. ResourceUnitMask).
+ uint64_t NextInSequenceMask;
+
+ /// This field is used to track resource units that are used (i.e. selected)
+ /// by other groups other than the one associated with this strategy object.
+ ///
+ /// In LLVM processor resource groups are allowed to partially (or fully)
+ /// overlap. That means, a same unit may be visible to multiple groups.
+ /// This field keeps track of uses that have originated from outside of
+ /// this group. The idea is to bias the selection strategy, so that resources
+ /// that haven't been used by other groups get prioritized.
+ ///
+ /// The end goal is to (try to) keep the resource distribution as much uniform
+ /// as possible. By construction, this mask only tracks one-level of resource
+ /// usage. Therefore, this strategy is expected to be less accurate when same
+ /// units are used multiple times by other groups within a single round of
+ /// select.
+ ///
+ /// Note: an LRU selector would have a better accuracy at the cost of being
+ /// slightly more expensive (mostly in terms of runtime cost). Methods
+ /// 'select' and 'used', are always in the hot execution path of llvm-mca.
+ /// Therefore, a slow implementation of 'select' would have a negative impact
+ /// on the overall performance of the tool.
+ uint64_t RemovedFromNextInSequence;
+
+public:
+ DefaultResourceStrategy(uint64_t UnitMask)
+ : ResourceStrategy(), ResourceUnitMask(UnitMask),
+ NextInSequenceMask(UnitMask), RemovedFromNextInSequence(0) {}
+ virtual ~DefaultResourceStrategy() = default;
+
+ uint64_t select(uint64_t ReadyMask) override;
+ void used(uint64_t Mask) override;
+};
+
+/// A processor resource descriptor.
+///
+/// There is an instance of this class for every processor resource defined by
+/// the machine scheduling model.
+/// Objects of class ResourceState dynamically track the usage of processor
+/// resource units.
+class ResourceState {
+ /// An index to the MCProcResourceDesc entry in the processor model.
+ const unsigned ProcResourceDescIndex;
+ /// A resource mask. This is generated by the tool with the help of
+ /// function `mca::computeProcResourceMasks' (see Support.h).
+ ///
+ /// Field ResourceMask only has one bit set if this resource state describes a
+ /// processor resource unit (i.e. this is not a group). That means, we can
+ /// quickly check if a resource is a group by simply counting the number of
+ /// bits that are set in the mask.
+ ///
+ /// The most significant bit of a mask (MSB) uniquely identifies a resource.
+ /// Remaining bits are used to describe the composition of a group (Group).
+ ///
+ /// Example (little endian):
+ /// Resource | Mask | MSB | Group
+ /// ---------+------------+------------+------------
+ /// A | 0b000001 | 0b000001 | 0b000000
+ /// | | |
+ /// B | 0b000010 | 0b000010 | 0b000000
+ /// | | |
+ /// C | 0b010000 | 0b010000 | 0b000000
+ /// | | |
+ /// D | 0b110010 | 0b100000 | 0b010010
+ ///
+ /// In this example, resources A, B and C are processor resource units.
+ /// Only resource D is a group resource, and it contains resources B and C.
+ /// That is because MSB(B) and MSB(C) are both contained within Group(D).
+ const uint64_t ResourceMask;
+
+ /// A ProcResource can have multiple units.
+ ///
+ /// For processor resource groups this field is a mask of contained resource
+ /// units. It is obtained from ResourceMask by clearing the highest set bit.
+ /// The number of resource units in a group can be simply computed as the
+ /// population count of this field.
+ ///
+ /// For normal (i.e. non-group) resources, the number of bits set in this mask
+ /// is equivalent to the number of units declared by the processor model (see
+ /// field 'NumUnits' in 'ProcResourceUnits').
+ uint64_t ResourceSizeMask;
+
+ /// A mask of ready units.
+ uint64_t ReadyMask;
+
+ /// Buffered resources will have this field set to a positive number different
+ /// than zero. A buffered resource behaves like a reservation station
+ /// implementing its own buffer for out-of-order execution.
+ ///
+ /// A BufferSize of 1 is used by scheduler resources that force in-order
+ /// execution.
+ ///
+ /// A BufferSize of 0 is used to model in-order issue/dispatch resources.
+ /// Since in-order issue/dispatch resources don't implement buffers, dispatch
+ /// events coincide with issue events.
+ /// Also, no other instruction ca be dispatched/issue while this resource is
+ /// in use. Only when all the "resource cycles" are consumed (after the issue
+ /// event), a new instruction ca be dispatched.
+ const int BufferSize;
+
+ /// Available slots in the buffer (zero, if this is not a buffered resource).
+ unsigned AvailableSlots;
+
+ /// This field is set if this resource is currently reserved.
+ ///
+ /// Resources can be reserved for a number of cycles.
+ /// Instructions can still be dispatched to reserved resources. However,
+ /// istructions dispatched to a reserved resource cannot be issued to the
+ /// underlying units (i.e. pipelines) until the resource is released.
+ bool Unavailable;
+
+ const bool IsAGroup;
+
+ /// Checks for the availability of unit 'SubResMask' in the group.
+ bool isSubResourceReady(uint64_t SubResMask) const {
+ return ReadyMask & SubResMask;
+ }
+
+public:
+ ResourceState(const MCProcResourceDesc &Desc, unsigned Index, uint64_t Mask);
+
+ unsigned getProcResourceID() const { return ProcResourceDescIndex; }
+ uint64_t getResourceMask() const { return ResourceMask; }
+ uint64_t getReadyMask() const { return ReadyMask; }
+ int getBufferSize() const { return BufferSize; }
+
+ bool isBuffered() const { return BufferSize > 0; }
+ bool isInOrder() const { return BufferSize == 1; }
+
+ /// Returns true if this is an in-order dispatch/issue resource.
+ bool isADispatchHazard() const { return BufferSize == 0; }
+ bool isReserved() const { return Unavailable; }
+
+ void setReserved() { Unavailable = true; }
+ void clearReserved() { Unavailable = false; }
+
+ /// Returs true if this resource is not reserved, and if there are at least
+ /// `NumUnits` available units.
+ bool isReady(unsigned NumUnits = 1) const;
+
+ bool isAResourceGroup() const { return IsAGroup; }
+
+ bool containsResource(uint64_t ID) const { return ResourceMask & ID; }
+
+ void markSubResourceAsUsed(uint64_t ID) {
+ assert(isSubResourceReady(ID));
+ ReadyMask ^= ID;
+ }
+
+ void releaseSubResource(uint64_t ID) {
+ assert(!isSubResourceReady(ID));
+ ReadyMask ^= ID;
+ }
+
+ unsigned getNumUnits() const {
+ return isAResourceGroup() ? 1U : countPopulation(ResourceSizeMask);
+ }
+
+ /// Checks if there is an available slot in the resource buffer.
+ ///
+ /// Returns RS_BUFFER_AVAILABLE if this is not a buffered resource, or if
+ /// there is a slot available.
+ ///
+ /// Returns RS_RESERVED if this buffered resource is a dispatch hazard, and it
+ /// is reserved.
+ ///
+ /// Returns RS_BUFFER_UNAVAILABLE if there are no available slots.
+ ResourceStateEvent isBufferAvailable() const;
+
+ /// Reserve a slot in the buffer.
+ void reserveBuffer() {
+ if (AvailableSlots)
+ AvailableSlots--;
+ }
+
+ /// Release a slot in the buffer.
+ void releaseBuffer() {
+ if (BufferSize > 0)
+ AvailableSlots++;
+ assert(AvailableSlots <= static_cast<unsigned>(BufferSize));
+ }
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+/// A resource unit identifier.
+///
+/// This is used to identify a specific processor resource unit using a pair
+/// of indices where the 'first' index is a processor resource mask, and the
+/// 'second' index is an index for a "sub-resource" (i.e. unit).
+typedef std::pair<uint64_t, uint64_t> ResourceRef;
+
+// First: a MCProcResourceDesc index identifying a buffered resource.
+// Second: max number of buffer entries used in this resource.
+typedef std::pair<unsigned, unsigned> BufferUsageEntry;
+
+/// A resource manager for processor resource units and groups.
+///
+/// This class owns all the ResourceState objects, and it is responsible for
+/// acting on requests from a Scheduler by updating the internal state of
+/// ResourceState objects.
+/// This class doesn't know about instruction itineraries and functional units.
+/// In future, it can be extended to support itineraries too through the same
+/// public interface.
+class ResourceManager {
+ // Set of resources available on the subtarget.
+ //
+ // There is an instance of ResourceState for every resource declared by the
+ // target scheduling model.
+ //
+ // Elements of this vector are ordered by resource kind. In particular,
+ // resource units take precedence over resource groups.
+ //
+ // The index of a processor resource in this vector depends on the value of
+ // its mask (see the description of field ResourceState::ResourceMask). In
+ // particular, it is computed as the position of the most significant bit set
+ // (MSB) in the mask plus one (since we want to ignore the invalid resource
+ // descriptor at index zero).
+ //
+ // Example (little endian):
+ //
+ // Resource | Mask | MSB | Index
+ // ---------+---------+---------+-------
+ // A | 0b00001 | 0b00001 | 1
+ // | | |
+ // B | 0b00100 | 0b00100 | 3
+ // | | |
+ // C | 0b10010 | 0b10000 | 5
+ //
+ //
+ // The same index is also used to address elements within vector `Strategies`
+ // and vector `Resource2Groups`.
+ std::vector<std::unique_ptr<ResourceState>> Resources;
+ std::vector<std::unique_ptr<ResourceStrategy>> Strategies;
+
+ // Used to quickly identify groups that own a particular resource unit.
+ std::vector<uint64_t> Resource2Groups;
+
+ // A table to map processor resource IDs to processor resource masks.
+ SmallVector<uint64_t, 8> ProcResID2Mask;
+
+ // Keeps track of which resources are busy, and how many cycles are left
+ // before those become usable again.
+ SmallDenseMap<ResourceRef, unsigned> BusyResources;
+
+ // Returns the actual resource unit that will be used.
+ ResourceRef selectPipe(uint64_t ResourceID);
+
+ void use(const ResourceRef &RR);
+ void release(const ResourceRef &RR);
+
+ unsigned getNumUnits(uint64_t ResourceID) const;
+
+ // Overrides the selection strategy for the processor resource with the given
+ // mask.
+ void setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
+ uint64_t ResourceMask);
+
+public:
+ ResourceManager(const MCSchedModel &SM);
+ virtual ~ResourceManager() = default;
+
+ // Overrides the selection strategy for the resource at index ResourceID in
+ // the MCProcResourceDesc table.
+ void setCustomStrategy(std::unique_ptr<ResourceStrategy> S,
+ unsigned ResourceID) {
+ assert(ResourceID < ProcResID2Mask.size() &&
+ "Invalid resource index in input!");
+ return setCustomStrategyImpl(std::move(S), ProcResID2Mask[ResourceID]);
+ }
+
+ // Returns RS_BUFFER_AVAILABLE if buffered resources are not reserved, and if
+ // there are enough available slots in the buffers.
+ ResourceStateEvent canBeDispatched(ArrayRef<uint64_t> Buffers) const;
+
+ // Return the processor resource identifier associated to this Mask.
+ unsigned resolveResourceMask(uint64_t Mask) const;
+
+ // Consume a slot in every buffered resource from array 'Buffers'. Resource
+ // units that are dispatch hazards (i.e. BufferSize=0) are marked as reserved.
+ void reserveBuffers(ArrayRef<uint64_t> Buffers);
+
+ // Release buffer entries previously allocated by method reserveBuffers.
+ void releaseBuffers(ArrayRef<uint64_t> Buffers);
+
+ // Reserve a processor resource. A reserved resource is not available for
+ // instruction issue until it is released.
+ void reserveResource(uint64_t ResourceID);
+
+ // Release a previously reserved processor resource.
+ void releaseResource(uint64_t ResourceID);
+
+ bool canBeIssued(const InstrDesc &Desc) const;
+
+ void issueInstruction(
+ const InstrDesc &Desc,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
+
+ void cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed);
+
+#ifndef NDEBUG
+ void dump() const {
+ for (const std::unique_ptr<ResourceState> &Resource : Resources)
+ Resource->dump();
+ }
+#endif
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RESOURCE_MANAGER_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/RetireControlUnit.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/RetireControlUnit.h
new file mode 100644
index 000000000000..71360e984ade
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/RetireControlUnit.h
@@ -0,0 +1,104 @@
+//===---------------------- RetireControlUnit.h -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file simulates the hardware responsible for retiring instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RETIRE_CONTROL_UNIT_H
+#define LLVM_MCA_RETIRE_CONTROL_UNIT_H
+
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include <vector>
+
+namespace llvm {
+namespace mca {
+
+/// This class tracks which instructions are in-flight (i.e., dispatched but not
+/// retired) in the OoO backend.
+//
+/// This class checks on every cycle if/which instructions can be retired.
+/// Instructions are retired in program order.
+/// In the event of an instruction being retired, the pipeline that owns
+/// this RetireControlUnit (RCU) gets notified.
+///
+/// On instruction retired, register updates are all architecturally
+/// committed, and any physicall registers previously allocated for the
+/// retired instruction are freed.
+struct RetireControlUnit : public HardwareUnit {
+ // A RUToken is created by the RCU for every instruction dispatched to the
+ // schedulers. These "tokens" are managed by the RCU in its token Queue.
+ //
+ // On every cycle ('cycleEvent'), the RCU iterates through the token queue
+ // looking for any token with its 'Executed' flag set. If a token has that
+ // flag set, then the instruction has reached the write-back stage and will
+ // be retired by the RCU.
+ //
+ // 'NumSlots' represents the number of entries consumed by the instruction in
+ // the reorder buffer. Those entries will become available again once the
+ // instruction is retired.
+ //
+ // Note that the size of the reorder buffer is defined by the scheduling
+ // model via field 'NumMicroOpBufferSize'.
+ struct RUToken {
+ InstRef IR;
+ unsigned NumSlots; // Slots reserved to this instruction.
+ bool Executed; // True if the instruction is past the WB stage.
+ };
+
+private:
+ unsigned NextAvailableSlotIdx;
+ unsigned CurrentInstructionSlotIdx;
+ unsigned AvailableSlots;
+ unsigned MaxRetirePerCycle; // 0 means no limit.
+ std::vector<RUToken> Queue;
+
+public:
+ RetireControlUnit(const MCSchedModel &SM);
+
+ bool isEmpty() const { return AvailableSlots == Queue.size(); }
+ bool isAvailable(unsigned Quantity = 1) const {
+ // Some instructions may declare a number of uOps which exceeds the size
+ // of the reorder buffer. To avoid problems, cap the amount of slots to
+ // the size of the reorder buffer.
+ Quantity = std::min(Quantity, static_cast<unsigned>(Queue.size()));
+
+ // Further normalize the number of micro opcodes for instructions that
+ // declare zero opcodes. This should match the behavior of method
+ // reserveSlot().
+ Quantity = std::max(Quantity, 1U);
+ return AvailableSlots >= Quantity;
+ }
+
+ unsigned getMaxRetirePerCycle() const { return MaxRetirePerCycle; }
+
+ // Reserves a number of slots, and returns a new token.
+ unsigned reserveSlot(const InstRef &IS, unsigned NumMicroOps);
+
+ // Return the current token from the RCU's circular token queue.
+ const RUToken &peekCurrentToken() const;
+
+ // Advance the pointer to the next token in the circular token queue.
+ void consumeCurrentToken();
+
+ // Update the RCU token to represent the executed state.
+ void onInstructionExecuted(unsigned TokenID);
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RETIRE_CONTROL_UNIT_H
diff --git a/contrib/llvm/include/llvm/MCA/HardwareUnits/Scheduler.h b/contrib/llvm/include/llvm/MCA/HardwareUnits/Scheduler.h
new file mode 100644
index 000000000000..351ea4827df9
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/HardwareUnits/Scheduler.h
@@ -0,0 +1,214 @@
+//===--------------------- Scheduler.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A scheduler for Processor Resource Units and Processor Resource Groups.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SCHEDULER_H
+#define LLVM_MCA_SCHEDULER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+#include "llvm/MCA/HardwareUnits/LSUnit.h"
+#include "llvm/MCA/HardwareUnits/ResourceManager.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+class SchedulerStrategy {
+public:
+ SchedulerStrategy() = default;
+ virtual ~SchedulerStrategy();
+
+ /// Returns true if Lhs should take priority over Rhs.
+ ///
+ /// This method is used by class Scheduler to select the "best" ready
+ /// instruction to issue to the underlying pipelines.
+ virtual bool compare(const InstRef &Lhs, const InstRef &Rhs) const = 0;
+};
+
+/// Default instruction selection strategy used by class Scheduler.
+class DefaultSchedulerStrategy : public SchedulerStrategy {
+ /// This method ranks instructions based on their age, and the number of known
+ /// users. The lower the rank value, the better.
+ int computeRank(const InstRef &Lhs) const {
+ return Lhs.getSourceIndex() - Lhs.getInstruction()->getNumUsers();
+ }
+
+public:
+ DefaultSchedulerStrategy() = default;
+ virtual ~DefaultSchedulerStrategy();
+
+ bool compare(const InstRef &Lhs, const InstRef &Rhs) const override {
+ int LhsRank = computeRank(Lhs);
+ int RhsRank = computeRank(Rhs);
+
+ /// Prioritize older instructions over younger instructions to minimize the
+ /// pressure on the reorder buffer.
+ if (LhsRank == RhsRank)
+ return Lhs.getSourceIndex() < Rhs.getSourceIndex();
+ return LhsRank < RhsRank;
+ }
+};
+
+/// Class Scheduler is responsible for issuing instructions to pipeline
+/// resources.
+///
+/// Internally, it delegates to a ResourceManager the management of processor
+/// resources. This class is also responsible for tracking the progress of
+/// instructions from the dispatch stage, until the write-back stage.
+///
+/// An instruction dispatched to the Scheduler is initially placed into either
+/// the 'WaitSet' or the 'ReadySet' depending on the availability of the input
+/// operands.
+///
+/// An instruction is moved from the WaitSet to the ReadySet when register
+/// operands become available, and all memory dependencies are met.
+/// Instructions that are moved from the WaitSet to the ReadySet transition
+/// in state from 'IS_AVAILABLE' to 'IS_READY'.
+///
+/// On every cycle, the Scheduler checks if it can promote instructions from the
+/// WaitSet to the ReadySet.
+///
+/// An Instruction is moved from the ReadySet the `IssuedSet` when it is issued
+/// to a (one or more) pipeline(s). This event also causes an instruction state
+/// transition (i.e. from state IS_READY, to state IS_EXECUTING). An Instruction
+/// leaves the IssuedSet when it reaches the write-back stage.
+class Scheduler : public HardwareUnit {
+ LSUnit &LSU;
+
+ // Instruction selection strategy for this Scheduler.
+ std::unique_ptr<SchedulerStrategy> Strategy;
+
+ // Hardware resources that are managed by this scheduler.
+ std::unique_ptr<ResourceManager> Resources;
+
+ std::vector<InstRef> WaitSet;
+ std::vector<InstRef> ReadySet;
+ std::vector<InstRef> IssuedSet;
+
+ /// Verify the given selection strategy and set the Strategy member
+ /// accordingly. If no strategy is provided, the DefaultSchedulerStrategy is
+ /// used.
+ void initializeStrategy(std::unique_ptr<SchedulerStrategy> S);
+
+ /// Issue an instruction without updating the ready queue.
+ void issueInstructionImpl(
+ InstRef &IR,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);
+
+ // Identify instructions that have finished executing, and remove them from
+ // the IssuedSet. References to executed instructions are added to input
+ // vector 'Executed'.
+ void updateIssuedSet(SmallVectorImpl<InstRef> &Executed);
+
+ // Try to promote instructions from WaitSet to ReadySet.
+ // Add promoted instructions to the 'Ready' vector in input.
+ void promoteToReadySet(SmallVectorImpl<InstRef> &Ready);
+
+public:
+ Scheduler(const MCSchedModel &Model, LSUnit &Lsu)
+ : Scheduler(Model, Lsu, nullptr) {}
+
+ Scheduler(const MCSchedModel &Model, LSUnit &Lsu,
+ std::unique_ptr<SchedulerStrategy> SelectStrategy)
+ : Scheduler(make_unique<ResourceManager>(Model), Lsu,
+ std::move(SelectStrategy)) {}
+
+ Scheduler(std::unique_ptr<ResourceManager> RM, LSUnit &Lsu,
+ std::unique_ptr<SchedulerStrategy> SelectStrategy)
+ : LSU(Lsu), Resources(std::move(RM)) {
+ initializeStrategy(std::move(SelectStrategy));
+ }
+
+ // Stalls generated by the scheduler.
+ enum Status {
+ SC_AVAILABLE,
+ SC_LOAD_QUEUE_FULL,
+ SC_STORE_QUEUE_FULL,
+ SC_BUFFERS_FULL,
+ SC_DISPATCH_GROUP_STALL,
+ };
+
+ /// Check if the instruction in 'IR' can be dispatched and returns an answer
+ /// in the form of a Status value.
+ ///
+ /// The DispatchStage is responsible for querying the Scheduler before
+ /// dispatching new instructions. This routine is used for performing such
+ /// a query. If the instruction 'IR' can be dispatched, then true is
+ /// returned, otherwise false is returned with Event set to the stall type.
+ /// Internally, it also checks if the load/store unit is available.
+ Status isAvailable(const InstRef &IR) const;
+
+ /// Reserves buffer and LSUnit queue resources that are necessary to issue
+ /// this instruction.
+ ///
+ /// Returns true if instruction IR is ready to be issued to the underlying
+ /// pipelines. Note that this operation cannot fail; it assumes that a
+ /// previous call to method `isAvailable(IR)` returned `SC_AVAILABLE`.
+ void dispatch(const InstRef &IR);
+
+ /// Returns true if IR is ready to be executed by the underlying pipelines.
+ /// This method assumes that IR has been previously dispatched.
+ bool isReady(const InstRef &IR) const;
+
+ /// Issue an instruction and populates a vector of used pipeline resources,
+ /// and a vector of instructions that transitioned to the ready state as a
+ /// result of this event.
+ void issueInstruction(
+ InstRef &IR,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Used,
+ SmallVectorImpl<InstRef> &Ready);
+
+ /// Returns true if IR has to be issued immediately, or if IR is a zero
+ /// latency instruction.
+ bool mustIssueImmediately(const InstRef &IR) const;
+
+ /// This routine notifies the Scheduler that a new cycle just started.
+ ///
+ /// It notifies the underlying ResourceManager that a new cycle just started.
+ /// Vector `Freed` is populated with resourceRef related to resources that
+ /// have changed in state, and that are now available to new instructions.
+ /// Instructions executed are added to vector Executed, while vector Ready is
+ /// populated with instructions that have become ready in this new cycle.
+ void cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
+ SmallVectorImpl<InstRef> &Ready,
+ SmallVectorImpl<InstRef> &Executed);
+
+ /// Convert a resource mask into a valid llvm processor resource identifier.
+ unsigned getResourceID(uint64_t Mask) const {
+ return Resources->resolveResourceMask(Mask);
+ }
+
+ /// Select the next instruction to issue from the ReadySet. Returns an invalid
+ /// instruction reference if there are no ready instructions, or if processor
+ /// resources are not available.
+ InstRef select();
+
+#ifndef NDEBUG
+ // Update the ready queues.
+ void dump() const;
+
+ // This routine performs a sanity check. This routine should only be called
+ // when we know that 'IR' is not in the scheduler's instruction queues.
+ void sanityCheck(const InstRef &IR) const {
+ assert(find(WaitSet, IR) == WaitSet.end() && "Already in the wait set!");
+ assert(find(ReadySet, IR) == ReadySet.end() && "Already in the ready set!");
+ assert(find(IssuedSet, IR) == IssuedSet.end() && "Already executing!");
+ }
+#endif // !NDEBUG
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SCHEDULER_H
diff --git a/contrib/llvm/include/llvm/MCA/InstrBuilder.h b/contrib/llvm/include/llvm/MCA/InstrBuilder.h
new file mode 100644
index 000000000000..5f998db5e4ce
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/InstrBuilder.h
@@ -0,0 +1,77 @@
+//===--------------------- InstrBuilder.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A builder class for instructions that are statically analyzed by llvm-mca.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRBUILDER_H
+#define LLVM_MCA_INSTRBUILDER_H
+
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Support.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+/// A builder class that knows how to construct Instruction objects.
+///
+/// Every llvm-mca Instruction is described by an object of class InstrDesc.
+/// An InstrDesc describes which registers are read/written by the instruction,
+/// as well as the instruction latency and hardware resources consumed.
+///
+/// This class is used by the tool to construct Instructions and instruction
+/// descriptors (i.e. InstrDesc objects).
+/// Information from the machine scheduling model is used to identify processor
+/// resources that are consumed by an instruction.
+class InstrBuilder {
+ const MCSubtargetInfo &STI;
+ const MCInstrInfo &MCII;
+ const MCRegisterInfo &MRI;
+ const MCInstrAnalysis *MCIA;
+ SmallVector<uint64_t, 8> ProcResourceMasks;
+
+ DenseMap<unsigned short, std::unique_ptr<const InstrDesc>> Descriptors;
+ DenseMap<const MCInst *, std::unique_ptr<const InstrDesc>> VariantDescriptors;
+
+ bool FirstCallInst;
+ bool FirstReturnInst;
+
+ Expected<const InstrDesc &> createInstrDescImpl(const MCInst &MCI);
+ Expected<const InstrDesc &> getOrCreateInstrDesc(const MCInst &MCI);
+
+ InstrBuilder(const InstrBuilder &) = delete;
+ InstrBuilder &operator=(const InstrBuilder &) = delete;
+
+ void populateWrites(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
+ void populateReads(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
+ Error verifyInstrDesc(const InstrDesc &ID, const MCInst &MCI) const;
+
+public:
+ InstrBuilder(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
+ const MCRegisterInfo &RI, const MCInstrAnalysis *IA);
+
+ void clear() {
+ VariantDescriptors.shrink_and_clear();
+ FirstCallInst = true;
+ FirstReturnInst = true;
+ }
+
+ Expected<std::unique_ptr<Instruction>> createInstruction(const MCInst &MCI);
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRBUILDER_H
diff --git a/contrib/llvm/include/llvm/MCA/Instruction.h b/contrib/llvm/include/llvm/MCA/Instruction.h
new file mode 100644
index 000000000000..b91610c64d85
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Instruction.h
@@ -0,0 +1,551 @@
+//===--------------------- Instruction.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines abstractions used by the Pipeline to model register reads,
+/// register writes and instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRUCTION_H
+#define LLVM_MCA_INSTRUCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MathExtras.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/raw_ostream.h"
+#endif
+
+#include <memory>
+
+namespace llvm {
+
+namespace mca {
+
+constexpr int UNKNOWN_CYCLES = -512;
+
+/// A register write descriptor.
+struct WriteDescriptor {
+ // Operand index. The index is negative for implicit writes only.
+ // For implicit writes, the actual operand index is computed performing
+ // a bitwise not of the OpIndex.
+ int OpIndex;
+ // Write latency. Number of cycles before write-back stage.
+ unsigned Latency;
+ // This field is set to a value different than zero only if this
+ // is an implicit definition.
+ unsigned RegisterID;
+ // Instruction itineraries would set this field to the SchedClass ID.
+ // Otherwise, it defaults to the WriteResourceID from the MCWriteLatencyEntry
+ // element associated to this write.
+ // When computing read latencies, this value is matched against the
+ // "ReadAdvance" information. The hardware backend may implement
+ // dedicated forwarding paths to quickly propagate write results to dependent
+ // instructions waiting in the reservation station (effectively bypassing the
+ // write-back stage).
+ unsigned SClassOrWriteResourceID;
+ // True only if this is a write obtained from an optional definition.
+ // Optional definitions are allowed to reference regID zero (i.e. "no
+ // register").
+ bool IsOptionalDef;
+
+ bool isImplicitWrite() const { return OpIndex < 0; };
+};
+
+/// A register read descriptor.
+struct ReadDescriptor {
+ // A MCOperand index. This is used by the Dispatch logic to identify register
+ // reads. Implicit reads have negative indices. The actual operand index of an
+ // implicit read is the bitwise not of field OpIndex.
+ int OpIndex;
+ // The actual "UseIdx". This is used to query the ReadAdvance table. Explicit
+ // uses always come first in the sequence of uses.
+ unsigned UseIndex;
+ // This field is only set if this is an implicit read.
+ unsigned RegisterID;
+ // Scheduling Class Index. It is used to query the scheduling model for the
+ // MCSchedClassDesc object.
+ unsigned SchedClassID;
+
+ bool isImplicitRead() const { return OpIndex < 0; };
+};
+
+class ReadState;
+
+/// Tracks uses of a register definition (e.g. register write).
+///
+/// Each implicit/explicit register write is associated with an instance of
+/// this class. A WriteState object tracks the dependent users of a
+/// register write. It also tracks how many cycles are left before the write
+/// back stage.
+class WriteState {
+ const WriteDescriptor *WD;
+ // On instruction issue, this field is set equal to the write latency.
+ // Before instruction issue, this field defaults to -512, a special
+ // value that represents an "unknown" number of cycles.
+ int CyclesLeft;
+
+ // Actual register defined by this write. This field is only used
+ // to speedup queries on the register file.
+ // For implicit writes, this field always matches the value of
+ // field RegisterID from WD.
+ unsigned RegisterID;
+
+ // Physical register file that serves register RegisterID.
+ unsigned PRFID;
+
+ // True if this write implicitly clears the upper portion of RegisterID's
+ // super-registers.
+ bool ClearsSuperRegs;
+
+ // True if this write is from a dependency breaking zero-idiom instruction.
+ bool WritesZero;
+
+ // True if this write has been eliminated at register renaming stage.
+ // Example: a register move doesn't consume scheduler/pipleline resources if
+ // it is eliminated at register renaming stage. It still consumes
+ // decode bandwidth, and ROB entries.
+ bool IsEliminated;
+
+ // This field is set if this is a partial register write, and it has a false
+ // dependency on any previous write of the same register (or a portion of it).
+ // DependentWrite must be able to complete before this write completes, so
+ // that we don't break the WAW, and the two writes can be merged together.
+ const WriteState *DependentWrite;
+
+ // A partial write that is in a false dependency with this write.
+ WriteState *PartialWrite;
+
+ unsigned DependentWriteCyclesLeft;
+
+ // A list of dependent reads. Users is a set of dependent
+ // reads. A dependent read is added to the set only if CyclesLeft
+ // is "unknown". As soon as CyclesLeft is 'known', each user in the set
+ // gets notified with the actual CyclesLeft.
+
+ // The 'second' element of a pair is a "ReadAdvance" number of cycles.
+ SmallVector<std::pair<ReadState *, int>, 4> Users;
+
+public:
+ WriteState(const WriteDescriptor &Desc, unsigned RegID,
+ bool clearsSuperRegs = false, bool writesZero = false)
+ : WD(&Desc), CyclesLeft(UNKNOWN_CYCLES), RegisterID(RegID), PRFID(0),
+ ClearsSuperRegs(clearsSuperRegs), WritesZero(writesZero),
+ IsEliminated(false), DependentWrite(nullptr), PartialWrite(nullptr),
+ DependentWriteCyclesLeft(0) {}
+
+ WriteState(const WriteState &Other) = default;
+ WriteState &operator=(const WriteState &Other) = default;
+
+ int getCyclesLeft() const { return CyclesLeft; }
+ unsigned getWriteResourceID() const { return WD->SClassOrWriteResourceID; }
+ unsigned getRegisterID() const { return RegisterID; }
+ unsigned getRegisterFileID() const { return PRFID; }
+ unsigned getLatency() const { return WD->Latency; }
+
+ void addUser(ReadState *Use, int ReadAdvance);
+ void addUser(WriteState *Use);
+
+ unsigned getDependentWriteCyclesLeft() const {
+ return DependentWriteCyclesLeft;
+ }
+
+ unsigned getNumUsers() const {
+ unsigned NumUsers = Users.size();
+ if (PartialWrite)
+ ++NumUsers;
+ return NumUsers;
+ }
+
+ bool clearsSuperRegisters() const { return ClearsSuperRegs; }
+ bool isWriteZero() const { return WritesZero; }
+ bool isEliminated() const { return IsEliminated; }
+ bool isExecuted() const {
+ return CyclesLeft != UNKNOWN_CYCLES && CyclesLeft <= 0;
+ }
+
+ const WriteState *getDependentWrite() const { return DependentWrite; }
+ void setDependentWrite(WriteState *Other) { DependentWrite = Other; }
+ void writeStartEvent(unsigned Cycles) {
+ DependentWriteCyclesLeft = Cycles;
+ DependentWrite = nullptr;
+ }
+
+ void setWriteZero() { WritesZero = true; }
+ void setEliminated() {
+ assert(Users.empty() && "Write is in an inconsistent state.");
+ CyclesLeft = 0;
+ IsEliminated = true;
+ }
+
+ void setPRF(unsigned PRF) { PRFID = PRF; }
+
+ // On every cycle, update CyclesLeft and notify dependent users.
+ void cycleEvent();
+ void onInstructionIssued();
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+/// Tracks register operand latency in cycles.
+///
+/// A read may be dependent on more than one write. This occurs when some
+/// writes only partially update the register associated to this read.
+class ReadState {
+ const ReadDescriptor *RD;
+ // Physical register identified associated to this read.
+ unsigned RegisterID;
+ // Physical register file that serves register RegisterID.
+ unsigned PRFID;
+ // Number of writes that contribute to the definition of RegisterID.
+ // In the absence of partial register updates, the number of DependentWrites
+ // cannot be more than one.
+ unsigned DependentWrites;
+ // Number of cycles left before RegisterID can be read. This value depends on
+ // the latency of all the dependent writes. It defaults to UNKNOWN_CYCLES.
+ // It gets set to the value of field TotalCycles only when the 'CyclesLeft' of
+ // every dependent write is known.
+ int CyclesLeft;
+ // This field is updated on every writeStartEvent(). When the number of
+ // dependent writes (i.e. field DependentWrite) is zero, this value is
+ // propagated to field CyclesLeft.
+ unsigned TotalCycles;
+ // This field is set to true only if there are no dependent writes, and
+ // there are no `CyclesLeft' to wait.
+ bool IsReady;
+ // True if this is a read from a known zero register.
+ bool IsZero;
+ // True if this register read is from a dependency-breaking instruction.
+ bool IndependentFromDef;
+
+public:
+ ReadState(const ReadDescriptor &Desc, unsigned RegID)
+ : RD(&Desc), RegisterID(RegID), PRFID(0), DependentWrites(0),
+ CyclesLeft(UNKNOWN_CYCLES), TotalCycles(0), IsReady(true),
+ IsZero(false), IndependentFromDef(false) {}
+
+ const ReadDescriptor &getDescriptor() const { return *RD; }
+ unsigned getSchedClass() const { return RD->SchedClassID; }
+ unsigned getRegisterID() const { return RegisterID; }
+ unsigned getRegisterFileID() const { return PRFID; }
+
+ bool isReady() const { return IsReady; }
+ bool isImplicitRead() const { return RD->isImplicitRead(); }
+
+ bool isIndependentFromDef() const { return IndependentFromDef; }
+ void setIndependentFromDef() { IndependentFromDef = true; }
+
+ void cycleEvent();
+ void writeStartEvent(unsigned Cycles);
+ void setDependentWrites(unsigned Writes) {
+ DependentWrites = Writes;
+ IsReady = !Writes;
+ }
+
+ bool isReadZero() const { return IsZero; }
+ void setReadZero() { IsZero = true; }
+ void setPRF(unsigned ID) { PRFID = ID; }
+};
+
+/// A sequence of cycles.
+///
+/// This class can be used as a building block to construct ranges of cycles.
+class CycleSegment {
+ unsigned Begin; // Inclusive.
+ unsigned End; // Exclusive.
+ bool Reserved; // Resources associated to this segment must be reserved.
+
+public:
+ CycleSegment(unsigned StartCycle, unsigned EndCycle, bool IsReserved = false)
+ : Begin(StartCycle), End(EndCycle), Reserved(IsReserved) {}
+
+ bool contains(unsigned Cycle) const { return Cycle >= Begin && Cycle < End; }
+ bool startsAfter(const CycleSegment &CS) const { return End <= CS.Begin; }
+ bool endsBefore(const CycleSegment &CS) const { return Begin >= CS.End; }
+ bool overlaps(const CycleSegment &CS) const {
+ return !startsAfter(CS) && !endsBefore(CS);
+ }
+ bool isExecuting() const { return Begin == 0 && End != 0; }
+ bool isExecuted() const { return End == 0; }
+ bool operator<(const CycleSegment &Other) const {
+ return Begin < Other.Begin;
+ }
+ CycleSegment &operator--(void) {
+ if (Begin)
+ Begin--;
+ if (End)
+ End--;
+ return *this;
+ }
+
+ bool isValid() const { return Begin <= End; }
+ unsigned size() const { return End - Begin; };
+ void subtract(unsigned Cycles) {
+ assert(End >= Cycles);
+ End -= Cycles;
+ }
+
+ unsigned begin() const { return Begin; }
+ unsigned end() const { return End; }
+ void setEnd(unsigned NewEnd) { End = NewEnd; }
+ bool isReserved() const { return Reserved; }
+ void setReserved() { Reserved = true; }
+};
+
+/// Helper used by class InstrDesc to describe how hardware resources
+/// are used.
+///
+/// This class describes how many resource units of a specific resource kind
+/// (and how many cycles) are "used" by an instruction.
+struct ResourceUsage {
+ CycleSegment CS;
+ unsigned NumUnits;
+ ResourceUsage(CycleSegment Cycles, unsigned Units = 1)
+ : CS(Cycles), NumUnits(Units) {}
+ unsigned size() const { return CS.size(); }
+ bool isReserved() const { return CS.isReserved(); }
+ void setReserved() { CS.setReserved(); }
+};
+
+/// An instruction descriptor
+struct InstrDesc {
+ SmallVector<WriteDescriptor, 4> Writes; // Implicit writes are at the end.
+ SmallVector<ReadDescriptor, 4> Reads; // Implicit reads are at the end.
+
+ // For every resource used by an instruction of this kind, this vector
+ // reports the number of "consumed cycles".
+ SmallVector<std::pair<uint64_t, ResourceUsage>, 4> Resources;
+
+ // A list of buffered resources consumed by this instruction.
+ SmallVector<uint64_t, 4> Buffers;
+
+ unsigned MaxLatency;
+ // Number of MicroOps for this instruction.
+ unsigned NumMicroOps;
+
+ bool MayLoad;
+ bool MayStore;
+ bool HasSideEffects;
+ bool BeginGroup;
+ bool EndGroup;
+
+ // True if all buffered resources are in-order, and there is at least one
+ // buffer which is a dispatch hazard (BufferSize = 0).
+ bool MustIssueImmediately;
+
+ // A zero latency instruction doesn't consume any scheduler resources.
+ bool isZeroLatency() const { return !MaxLatency && Resources.empty(); }
+
+ InstrDesc() = default;
+ InstrDesc(const InstrDesc &Other) = delete;
+ InstrDesc &operator=(const InstrDesc &Other) = delete;
+};
+
+/// Base class for instructions consumed by the simulation pipeline.
+///
+/// This class tracks data dependencies as well as generic properties
+/// of the instruction.
+class InstructionBase {
+ const InstrDesc &Desc;
+
+ // This field is set for instructions that are candidates for move
+ // elimination. For more information about move elimination, see the
+ // definition of RegisterMappingTracker in RegisterFile.h
+ bool IsOptimizableMove;
+
+ // Output dependencies.
+ // One entry per each implicit and explicit register definition.
+ SmallVector<WriteState, 4> Defs;
+
+ // Input dependencies.
+ // One entry per each implicit and explicit register use.
+ SmallVector<ReadState, 4> Uses;
+
+public:
+ InstructionBase(const InstrDesc &D) : Desc(D), IsOptimizableMove(false) {}
+
+ SmallVectorImpl<WriteState> &getDefs() { return Defs; }
+ const ArrayRef<WriteState> getDefs() const { return Defs; }
+ SmallVectorImpl<ReadState> &getUses() { return Uses; }
+ const ArrayRef<ReadState> getUses() const { return Uses; }
+ const InstrDesc &getDesc() const { return Desc; }
+
+ unsigned getLatency() const { return Desc.MaxLatency; }
+
+ bool hasDependentUsers() const {
+ return any_of(Defs,
+ [](const WriteState &Def) { return Def.getNumUsers() > 0; });
+ }
+
+ unsigned getNumUsers() const {
+ unsigned NumUsers = 0;
+ for (const WriteState &Def : Defs)
+ NumUsers += Def.getNumUsers();
+ return NumUsers;
+ }
+
+ // Returns true if this instruction is a candidate for move elimination.
+ bool isOptimizableMove() const { return IsOptimizableMove; }
+ void setOptimizableMove() { IsOptimizableMove = true; }
+};
+
+/// An instruction propagated through the simulated instruction pipeline.
+///
+/// This class is used to monitor changes to the internal state of instructions
+/// that are sent to the various components of the simulated hardware pipeline.
+class Instruction : public InstructionBase {
+ enum InstrStage {
+ IS_INVALID, // Instruction in an invalid state.
+ IS_AVAILABLE, // Instruction dispatched but operands are not ready.
+ IS_READY, // Instruction dispatched and operands ready.
+ IS_EXECUTING, // Instruction issued.
+ IS_EXECUTED, // Instruction executed. Values are written back.
+ IS_RETIRED // Instruction retired.
+ };
+
+ // The current instruction stage.
+ enum InstrStage Stage;
+
+ // This value defaults to the instruction latency. This instruction is
+ // considered executed when field CyclesLeft goes to zero.
+ int CyclesLeft;
+
+ // Retire Unit token ID for this instruction.
+ unsigned RCUTokenID;
+
+public:
+ Instruction(const InstrDesc &D)
+ : InstructionBase(D), Stage(IS_INVALID), CyclesLeft(UNKNOWN_CYCLES),
+ RCUTokenID(0) {}
+
+ unsigned getRCUTokenID() const { return RCUTokenID; }
+ int getCyclesLeft() const { return CyclesLeft; }
+
+ // Transition to the dispatch stage, and assign a RCUToken to this
+ // instruction. The RCUToken is used to track the completion of every
+ // register write performed by this instruction.
+ void dispatch(unsigned RCUTokenID);
+
+ // Instruction issued. Transition to the IS_EXECUTING state, and update
+ // all the definitions.
+ void execute();
+
+ // Force a transition from the IS_AVAILABLE state to the IS_READY state if
+ // input operands are all ready. State transitions normally occur at the
+ // beginning of a new cycle (see method cycleEvent()). However, the scheduler
+ // may decide to promote instructions from the wait queue to the ready queue
+ // as the result of another issue event. This method is called every time the
+ // instruction might have changed in state.
+ void update();
+
+ bool isDispatched() const { return Stage == IS_AVAILABLE; }
+ bool isReady() const { return Stage == IS_READY; }
+ bool isExecuting() const { return Stage == IS_EXECUTING; }
+ bool isExecuted() const { return Stage == IS_EXECUTED; }
+ bool isRetired() const { return Stage == IS_RETIRED; }
+
+ bool isEliminated() const {
+ return isReady() && getDefs().size() &&
+ all_of(getDefs(),
+ [](const WriteState &W) { return W.isEliminated(); });
+ }
+
+ // Forces a transition from state IS_AVAILABLE to state IS_EXECUTED.
+ void forceExecuted();
+
+ void retire() {
+ assert(isExecuted() && "Instruction is in an invalid state!");
+ Stage = IS_RETIRED;
+ }
+
+ void cycleEvent();
+};
+
+/// An InstRef contains both a SourceMgr index and Instruction pair. The index
+/// is used as a unique identifier for the instruction. MCA will make use of
+/// this index as a key throughout MCA.
+class InstRef {
+ std::pair<unsigned, Instruction *> Data;
+
+public:
+ InstRef() : Data(std::make_pair(0, nullptr)) {}
+ InstRef(unsigned Index, Instruction *I) : Data(std::make_pair(Index, I)) {}
+
+ bool operator==(const InstRef &Other) const { return Data == Other.Data; }
+
+ unsigned getSourceIndex() const { return Data.first; }
+ Instruction *getInstruction() { return Data.second; }
+ const Instruction *getInstruction() const { return Data.second; }
+
+ /// Returns true if this references a valid instruction.
+ operator bool() const { return Data.second != nullptr; }
+
+ /// Invalidate this reference.
+ void invalidate() { Data.second = nullptr; }
+
+#ifndef NDEBUG
+ void print(raw_ostream &OS) const { OS << getSourceIndex(); }
+#endif
+};
+
+#ifndef NDEBUG
+inline raw_ostream &operator<<(raw_ostream &OS, const InstRef &IR) {
+ IR.print(OS);
+ return OS;
+}
+#endif
+
+/// A reference to a register write.
+///
+/// This class is mainly used by the register file to describe register
+/// mappings. It correlates a register write to the source index of the
+/// defining instruction.
+class WriteRef {
+ std::pair<unsigned, WriteState *> Data;
+ static const unsigned INVALID_IID;
+
+public:
+ WriteRef() : Data(INVALID_IID, nullptr) {}
+ WriteRef(unsigned SourceIndex, WriteState *WS) : Data(SourceIndex, WS) {}
+
+ unsigned getSourceIndex() const { return Data.first; }
+ const WriteState *getWriteState() const { return Data.second; }
+ WriteState *getWriteState() { return Data.second; }
+ void invalidate() { Data.second = nullptr; }
+ bool isWriteZero() const {
+ assert(isValid() && "Invalid null WriteState found!");
+ return getWriteState()->isWriteZero();
+ }
+
+ /// Returns true if this register write has been executed, and the new
+ /// register value is therefore available to users.
+ bool isAvailable() const {
+ if (getSourceIndex() == INVALID_IID)
+ return false;
+ const WriteState *WS = getWriteState();
+ return !WS || WS->isExecuted();
+ }
+
+ bool isValid() const { return Data.first != INVALID_IID && Data.second; }
+ bool operator==(const WriteRef &Other) const { return Data == Other.Data; }
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRUCTION_H
diff --git a/contrib/llvm/include/llvm/MCA/Pipeline.h b/contrib/llvm/include/llvm/MCA/Pipeline.h
new file mode 100644
index 000000000000..acd256060bdd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Pipeline.h
@@ -0,0 +1,79 @@
+//===--------------------- Pipeline.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements an ordered container of stages that simulate the
+/// pipeline of a hardware backend.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_PIPELINE_H
+#define LLVM_MCA_PIPELINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/Stage.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+class HWEventListener;
+
+/// A pipeline for a specific subtarget.
+///
+/// It emulates an out-of-order execution of instructions. Instructions are
+/// fetched from a MCInst sequence managed by an initial 'Fetch' stage.
+/// Instructions are firstly fetched, then dispatched to the schedulers, and
+/// then executed.
+///
+/// This class tracks the lifetime of an instruction from the moment where
+/// it gets dispatched to the schedulers, to the moment where it finishes
+/// executing and register writes are architecturally committed.
+/// In particular, it monitors changes in the state of every instruction
+/// in flight.
+///
+/// Instructions are executed in a loop of iterations. The number of iterations
+/// is defined by the SourceMgr object, which is managed by the initial stage
+/// of the instruction pipeline.
+///
+/// The Pipeline entry point is method 'run()' which executes cycles in a loop
+/// until there are new instructions to dispatch, and not every instruction
+/// has been retired.
+///
+/// Internally, the Pipeline collects statistical information in the form of
+/// histograms. For example, it tracks how the dispatch group size changes
+/// over time.
+class Pipeline {
+ Pipeline(const Pipeline &P) = delete;
+ Pipeline &operator=(const Pipeline &P) = delete;
+
+ /// An ordered list of stages that define this instruction pipeline.
+ SmallVector<std::unique_ptr<Stage>, 8> Stages;
+ std::set<HWEventListener *> Listeners;
+ unsigned Cycles;
+
+ Error runCycle();
+ bool hasWorkToProcess();
+ void notifyCycleBegin();
+ void notifyCycleEnd();
+
+public:
+ Pipeline() : Cycles(0) {}
+ void appendStage(std::unique_ptr<Stage> S);
+
+ /// Returns the total number of simulated cycles.
+ Expected<unsigned> run();
+
+ void addEventListener(HWEventListener *Listener);
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_PIPELINE_H
diff --git a/contrib/llvm/include/llvm/MCA/SourceMgr.h b/contrib/llvm/include/llvm/MCA/SourceMgr.h
new file mode 100644
index 000000000000..5e0ca6419f5d
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/SourceMgr.h
@@ -0,0 +1,57 @@
+//===--------------------- SourceMgr.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements class SourceMgr. Class SourceMgr abstracts the input
+/// code sequence (a sequence of MCInst), and assings unique identifiers to
+/// every instruction in the sequence.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SOURCEMGR_H
+#define LLVM_MCA_SOURCEMGR_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+namespace mca {
+
+class Instruction;
+
+typedef std::pair<unsigned, const Instruction &> SourceRef;
+
+class SourceMgr {
+ using UniqueInst = std::unique_ptr<Instruction>;
+ ArrayRef<UniqueInst> Sequence;
+ unsigned Current;
+ const unsigned Iterations;
+ static const unsigned DefaultIterations = 100;
+
+public:
+ SourceMgr(ArrayRef<UniqueInst> S, unsigned Iter)
+ : Sequence(S), Current(0), Iterations(Iter ? Iter : DefaultIterations) {}
+
+ unsigned getNumIterations() const { return Iterations; }
+ unsigned size() const { return Sequence.size(); }
+ bool hasNext() const { return Current < (Iterations * Sequence.size()); }
+ void updateNext() { ++Current; }
+
+ SourceRef peekNext() const {
+ assert(hasNext() && "Already at end of sequence!");
+ return SourceRef(Current, *Sequence[Current % Sequence.size()]);
+ }
+
+ using const_iterator = ArrayRef<UniqueInst>::const_iterator;
+ const_iterator begin() const { return Sequence.begin(); }
+ const_iterator end() const { return Sequence.end(); }
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SOURCEMGR_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/DispatchStage.h b/contrib/llvm/include/llvm/MCA/Stages/DispatchStage.h
new file mode 100644
index 000000000000..f015cd7522eb
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/DispatchStage.h
@@ -0,0 +1,93 @@
+//===----------------------- DispatchStage.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file models the dispatch component of an instruction pipeline.
+///
+/// The DispatchStage is responsible for updating instruction dependencies
+/// and communicating to the simulated instruction scheduler that an instruction
+/// is ready to be scheduled for execution.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_DISPATCH_STAGE_H
+#define LLVM_MCA_DISPATCH_STAGE_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+// Implements the hardware dispatch logic.
+//
+// This class is responsible for the dispatch stage, in which instructions are
+// dispatched in groups to the Scheduler. An instruction can be dispatched if
+// the following conditions are met:
+// 1) There are enough entries in the reorder buffer (see class
+// RetireControlUnit) to write the opcodes associated with the instruction.
+// 2) There are enough physical registers to rename output register operands.
+// 3) There are enough entries available in the used buffered resource(s).
+//
+// The number of micro opcodes that can be dispatched in one cycle is limited by
+// the value of field 'DispatchWidth'. A "dynamic dispatch stall" occurs when
+// processor resources are not available. Dispatch stall events are counted
+// during the entire execution of the code, and displayed by the performance
+// report when flag '-dispatch-stats' is specified.
+//
+// If the number of micro opcodes exceedes DispatchWidth, then the instruction
+// is dispatched in multiple cycles.
+class DispatchStage final : public Stage {
+ unsigned DispatchWidth;
+ unsigned AvailableEntries;
+ unsigned CarryOver;
+ InstRef CarriedOver;
+ const MCSubtargetInfo &STI;
+ RetireControlUnit &RCU;
+ RegisterFile &PRF;
+
+ bool checkRCU(const InstRef &IR) const;
+ bool checkPRF(const InstRef &IR) const;
+ bool canDispatch(const InstRef &IR) const;
+ Error dispatch(InstRef IR);
+
+ void updateRAWDependencies(ReadState &RS, const MCSubtargetInfo &STI);
+
+ void notifyInstructionDispatched(const InstRef &IR,
+ ArrayRef<unsigned> UsedPhysRegs,
+ unsigned uOps) const;
+
+public:
+ DispatchStage(const MCSubtargetInfo &Subtarget, const MCRegisterInfo &MRI,
+ unsigned MaxDispatchWidth, RetireControlUnit &R,
+ RegisterFile &F)
+ : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
+ CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {}
+
+ bool isAvailable(const InstRef &IR) const override;
+
+ // The dispatch logic internally doesn't buffer instructions. So there is
+ // never work to do at the beginning of every cycle.
+ bool hasWorkToComplete() const override { return false; }
+ Error cycleStart() override;
+ Error execute(InstRef &IR) override;
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_DISPATCH_STAGE_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/EntryStage.h b/contrib/llvm/include/llvm/MCA/Stages/EntryStage.h
new file mode 100644
index 000000000000..cd9a65b8cc2b
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/EntryStage.h
@@ -0,0 +1,52 @@
+//===---------------------- EntryStage.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the Entry stage of an instruction pipeline. Its sole
+/// purpose in life is to pick instructions in sequence and move them to the
+/// next pipeline stage.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_ENTRY_STAGE_H
+#define LLVM_MCA_ENTRY_STAGE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/SourceMgr.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class EntryStage final : public Stage {
+ InstRef CurrentInstruction;
+ SmallVector<std::unique_ptr<Instruction>, 16> Instructions;
+ SourceMgr &SM;
+ unsigned NumRetired;
+
+ // Updates the program counter, and sets 'CurrentInstruction'.
+ void getNextInstruction();
+
+ EntryStage(const EntryStage &Other) = delete;
+ EntryStage &operator=(const EntryStage &Other) = delete;
+
+public:
+ EntryStage(SourceMgr &SM) : CurrentInstruction(), SM(SM), NumRetired(0) { }
+
+ bool isAvailable(const InstRef &IR) const override;
+ bool hasWorkToComplete() const override;
+ Error execute(InstRef &IR) override;
+ Error cycleStart() override;
+ Error cycleEnd() override;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_FETCH_STAGE_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/ExecuteStage.h b/contrib/llvm/include/llvm/MCA/Stages/ExecuteStage.h
new file mode 100644
index 000000000000..8cb287e06d9f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/ExecuteStage.h
@@ -0,0 +1,80 @@
+//===---------------------- ExecuteStage.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the execution stage of a default instruction pipeline.
+///
+/// The ExecuteStage is responsible for managing the hardware scheduler
+/// and issuing notifications that an instruction has been executed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_EXECUTE_STAGE_H
+#define LLVM_MCA_EXECUTE_STAGE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class ExecuteStage final : public Stage {
+ Scheduler &HWS;
+
+ Error issueInstruction(InstRef &IR);
+
+ // Called at the beginning of each cycle to issue already dispatched
+ // instructions to the underlying pipelines.
+ Error issueReadyInstructions();
+
+ // Used to notify instructions eliminated at register renaming stage.
+ Error handleInstructionEliminated(InstRef &IR);
+
+ ExecuteStage(const ExecuteStage &Other) = delete;
+ ExecuteStage &operator=(const ExecuteStage &Other) = delete;
+
+public:
+ ExecuteStage(Scheduler &S) : Stage(), HWS(S) {}
+
+ // This stage works under the assumption that the Pipeline will eventually
+ // execute a retire stage. We don't need to check if pipelines and/or
+ // schedulers have instructions to process, because those instructions are
+ // also tracked by the retire control unit. That means,
+ // RetireControlUnit::hasWorkToComplete() is responsible for checking if there
+ // are still instructions in-flight in the out-of-order backend.
+ bool hasWorkToComplete() const override { return false; }
+ bool isAvailable(const InstRef &IR) const override;
+
+ // Notifies the scheduler that a new cycle just started.
+ //
+ // This method notifies the scheduler that a new cycle started.
+ // This method is also responsible for notifying listeners about instructions
+ // state changes, and processor resources freed by the scheduler.
+ // Instructions that transitioned to the 'Executed' state are automatically
+ // moved to the next stage (i.e. RetireStage).
+ Error cycleStart() override;
+ Error execute(InstRef &IR) override;
+
+ void notifyInstructionIssued(
+ const InstRef &IR,
+ MutableArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const;
+ void notifyInstructionExecuted(const InstRef &IR) const;
+ void notifyInstructionReady(const InstRef &IR) const;
+ void notifyResourceAvailable(const ResourceRef &RR) const;
+
+ // Notify listeners that buffered resources have been consumed or freed.
+ void notifyReservedOrReleasedBuffers(const InstRef &IR, bool Reserved) const;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_EXECUTE_STAGE_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/InstructionTables.h b/contrib/llvm/include/llvm/MCA/Stages/InstructionTables.h
new file mode 100644
index 000000000000..34e338f0ce6b
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/InstructionTables.h
@@ -0,0 +1,46 @@
+//===--------------------- InstructionTables.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements a custom stage to generate instruction tables.
+/// See the description of command-line flag -instruction-tables in
+/// docs/CommandGuide/lvm-mca.rst
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_INSTRUCTIONTABLES_H
+#define LLVM_MCA_INSTRUCTIONTABLES_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/Stage.h"
+#include "llvm/MCA/Support.h"
+
+namespace llvm {
+namespace mca {
+
+class InstructionTables final : public Stage {
+ const MCSchedModel &SM;
+ SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> UsedResources;
+ SmallVector<uint64_t, 8> Masks;
+
+public:
+ InstructionTables(const MCSchedModel &Model)
+ : Stage(), SM(Model), Masks(Model.getNumProcResourceKinds()) {
+ computeProcResourceMasks(Model, Masks);
+ }
+
+ bool hasWorkToComplete() const override { return false; }
+ Error execute(InstRef &IR) override;
+};
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_INSTRUCTIONTABLES_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/RetireStage.h b/contrib/llvm/include/llvm/MCA/Stages/RetireStage.h
new file mode 100644
index 000000000000..2051ce5c86ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/RetireStage.h
@@ -0,0 +1,48 @@
+//===---------------------- RetireStage.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the retire stage of a default instruction pipeline.
+/// The RetireStage represents the process logic that interacts with the
+/// simulated RetireControlUnit hardware.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_RETIRE_STAGE_H
+#define LLVM_MCA_RETIRE_STAGE_H
+
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+class RetireStage final : public Stage {
+ // Owner will go away when we move listeners/eventing to the stages.
+ RetireControlUnit &RCU;
+ RegisterFile &PRF;
+
+ RetireStage(const RetireStage &Other) = delete;
+ RetireStage &operator=(const RetireStage &Other) = delete;
+
+public:
+ RetireStage(RetireControlUnit &R, RegisterFile &F)
+ : Stage(), RCU(R), PRF(F) {}
+
+ bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
+ Error cycleStart() override;
+ Error execute(InstRef &IR) override;
+ void notifyInstructionRetired(const InstRef &IR) const;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_RETIRE_STAGE_H
diff --git a/contrib/llvm/include/llvm/MCA/Stages/Stage.h b/contrib/llvm/include/llvm/MCA/Stages/Stage.h
new file mode 100644
index 000000000000..fc7ab569bb0f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Stages/Stage.h
@@ -0,0 +1,88 @@
+//===---------------------- Stage.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a stage.
+/// A chain of stages compose an instruction pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_STAGE_H
+#define LLVM_MCA_STAGE_H
+
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Error.h"
+#include <set>
+
+namespace llvm {
+namespace mca {
+
+class InstRef;
+
+class Stage {
+ Stage *NextInSequence;
+ std::set<HWEventListener *> Listeners;
+
+ Stage(const Stage &Other) = delete;
+ Stage &operator=(const Stage &Other) = delete;
+
+protected:
+ const std::set<HWEventListener *> &getListeners() const { return Listeners; }
+
+public:
+ Stage() : NextInSequence(nullptr) {}
+ virtual ~Stage();
+
+ /// Returns true if it can execute IR during this cycle.
+ virtual bool isAvailable(const InstRef &IR) const { return true; }
+
+ /// Returns true if some instructions are still executing this stage.
+ virtual bool hasWorkToComplete() const = 0;
+
+ /// Called once at the start of each cycle. This can be used as a setup
+ /// phase to prepare for the executions during the cycle.
+ virtual Error cycleStart() { return ErrorSuccess(); }
+
+ /// Called once at the end of each cycle.
+ virtual Error cycleEnd() { return ErrorSuccess(); }
+
+ /// The primary action that this stage performs on instruction IR.
+ virtual Error execute(InstRef &IR) = 0;
+
+ void setNextInSequence(Stage *NextStage) {
+ assert(!NextInSequence && "This stage already has a NextInSequence!");
+ NextInSequence = NextStage;
+ }
+
+ bool checkNextStage(const InstRef &IR) const {
+ return NextInSequence && NextInSequence->isAvailable(IR);
+ }
+
+ /// Called when an instruction is ready to move the next pipeline stage.
+ ///
+ /// Stages are responsible for moving instructions to their immediate
+ /// successor stages.
+ Error moveToTheNextStage(InstRef &IR) {
+ assert(checkNextStage(IR) && "Next stage is not ready!");
+ return NextInSequence->execute(IR);
+ }
+
+ /// Add a listener to receive callbacks during the execution of this stage.
+ void addListener(HWEventListener *Listener);
+
+ /// Notify listeners of a particular hardware event.
+ template <typename EventT> void notifyEvent(const EventT &Event) const {
+ for (HWEventListener *Listener : Listeners)
+ Listener->onEvent(Event);
+ }
+};
+
+} // namespace mca
+} // namespace llvm
+#endif // LLVM_MCA_STAGE_H
diff --git a/contrib/llvm/include/llvm/MCA/Support.h b/contrib/llvm/include/llvm/MCA/Support.h
new file mode 100644
index 000000000000..7b0c5bf3a486
--- /dev/null
+++ b/contrib/llvm/include/llvm/MCA/Support.h
@@ -0,0 +1,119 @@
+//===--------------------- Support.h ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Helper functions used by various pipeline components.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MCA_SUPPORT_H
+#define LLVM_MCA_SUPPORT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace mca {
+
+template <typename T>
+class InstructionError : public ErrorInfo<InstructionError<T>> {
+public:
+ static char ID;
+ std::string Message;
+ const T &Inst;
+
+ InstructionError(std::string M, const T &MCI)
+ : Message(std::move(M)), Inst(MCI) {}
+
+ void log(raw_ostream &OS) const override { OS << Message; }
+
+ std::error_code convertToErrorCode() const override {
+ return inconvertibleErrorCode();
+ }
+};
+
+template <typename T> char InstructionError<T>::ID;
+
+/// This class represents the number of cycles per resource (fractions of
+/// cycles). That quantity is managed here as a ratio, and accessed via the
+/// double cast-operator below. The two quantities, number of cycles and
+/// number of resources, are kept separate. This is used by the
+/// ResourcePressureView to calculate the average resource cycles
+/// per instruction/iteration.
+class ResourceCycles {
+ unsigned Numerator, Denominator;
+
+public:
+ ResourceCycles() : Numerator(0), Denominator(1) {}
+ ResourceCycles(unsigned Cycles, unsigned ResourceUnits = 1)
+ : Numerator(Cycles), Denominator(ResourceUnits) {}
+
+ operator double() const {
+ assert(Denominator && "Invalid denominator (must be non-zero).");
+ return (Denominator == 1) ? Numerator : (double)Numerator / Denominator;
+ }
+
+ // Add the components of RHS to this instance. Instead of calculating
+ // the final value here, we keep track of the numerator and denominator
+ // separately, to reduce floating point error.
+ ResourceCycles &operator+=(const ResourceCycles &RHS) {
+ if (Denominator == RHS.Denominator)
+ Numerator += RHS.Numerator;
+ else {
+ // Create a common denominator for LHS and RHS by calculating the least
+ // common multiple from the GCD.
+ unsigned GCD = GreatestCommonDivisor64(Denominator, RHS.Denominator);
+ unsigned LCM = (Denominator * RHS.Denominator) / GCD;
+ unsigned LHSNumerator = Numerator * (LCM / Denominator);
+ unsigned RHSNumerator = RHS.Numerator * (LCM / RHS.Denominator);
+ Numerator = LHSNumerator + RHSNumerator;
+ Denominator = LCM;
+ }
+ return *this;
+ }
+};
+
+/// Populates vector Masks with processor resource masks.
+///
+/// The number of bits set in a mask depends on the processor resource type.
+/// Each processor resource mask has at least one bit set. For groups, the
+/// number of bits set in the mask is equal to the cardinality of the group plus
+/// one. Excluding the most significant bit, the remaining bits in the mask
+/// identify processor resources that are part of the group.
+///
+/// Example:
+///
+/// ResourceA -- Mask: 0b001
+/// ResourceB -- Mask: 0b010
+/// ResourceAB -- Mask: 0b100 U (ResourceA::Mask | ResourceB::Mask) == 0b111
+///
+/// ResourceAB is a processor resource group containing ResourceA and ResourceB.
+/// Each resource mask uniquely identifies a resource; both ResourceA and
+/// ResourceB only have one bit set.
+/// ResourceAB is a group; excluding the most significant bit in the mask, the
+/// remaining bits identify the composition of the group.
+///
+/// Resource masks are used by the ResourceManager to solve set membership
+/// problems with simple bit manipulation operations.
+void computeProcResourceMasks(const MCSchedModel &SM,
+ MutableArrayRef<uint64_t> Masks);
+
+/// Compute the reciprocal block throughput from a set of processor resource
+/// cycles. The reciprocal block throughput is computed as the MAX between:
+/// - NumMicroOps / DispatchWidth
+/// - ProcResourceCycles / #ProcResourceUnits (for every consumed resource).
+double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
+ unsigned NumMicroOps,
+ ArrayRef<unsigned> ProcResourceUsage);
+} // namespace mca
+} // namespace llvm
+
+#endif // LLVM_MCA_SUPPORT_H
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 6caadea0175b..b753d261a0fc 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -594,6 +594,8 @@ enum class coff_guard_flags : uint32_t {
FidTableHasFlags = 0x10000000, // Indicates that fid tables are 5 bytes
};
+enum class frame_type : uint16_t { Fpo = 0, Trap = 1, Tss = 2, NonFpo = 3 };
+
struct coff_load_config_code_integrity {
support::ulittle16_t Flags;
support::ulittle16_t Catalog;
@@ -883,6 +885,7 @@ public:
assert(is64());
return reinterpret_cast<const coff_load_configuration64 *>(LoadConfig);
}
+ StringRef getRelocationTypeName(uint16_t Type) const;
protected:
void moveSymbolNext(DataRefImpl &Symb) const override;
@@ -968,6 +971,9 @@ public:
return nullptr;
return reinterpret_cast<const dos_header *>(base());
}
+ std::error_code getCOFFHeader(const coff_file_header *&Res) const;
+ std::error_code
+ getCOFFBigObjHeader(const coff_bigobj_file_header *&Res) const;
std::error_code getPE32Header(const pe32_header *&Res) const;
std::error_code getPE32PlusHeader(const pe32plus_header *&Res) const;
std::error_code getDataDirectory(uint32_t index,
@@ -1016,6 +1022,8 @@ public:
ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;
+ uint32_t getSymbolIndex(COFFSymbolRef Symbol) const;
+
size_t getSymbolTableEntrySize() const {
if (COFFHeader)
return sizeof(coff_symbol16);
@@ -1059,6 +1067,8 @@ public:
bool isRelocatableObject() const override;
bool is64() const { return PE32PlusHeader; }
+ StringRef mapDebugSectionName(StringRef Name) const override;
+
static bool classof(const Binary *v) { return v->isCOFF(); }
};
@@ -1227,7 +1237,7 @@ struct FpoData {
bool useBP() const { return (Attributes >> 10) & 1; }
// cbFrame: frame pointer
- int getFP() const { return Attributes >> 14; }
+ frame_type getFP() const { return static_cast<frame_type>(Attributes >> 14); }
};
} // end namespace object
diff --git a/contrib/llvm/include/llvm/Object/ELF.h b/contrib/llvm/include/llvm/Object/ELF.h
index 752d468fd25b..bcdc190cc7dc 100644
--- a/contrib/llvm/include/llvm/Object/ELF.h
+++ b/contrib/llvm/include/llvm/Object/ELF.h
@@ -32,7 +32,7 @@ namespace llvm {
namespace object {
StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
-uint32_t getELFRelrRelocationType(uint32_t Machine);
+uint32_t getELFRelativeRelocationType(uint32_t Machine);
StringRef getELFSectionTypeName(uint32_t Machine, uint32_t Type);
// Subclasses of ELFFile may need this for template instantiation
@@ -113,7 +113,7 @@ public:
StringRef getRelocationTypeName(uint32_t Type) const;
void getRelocationTypeName(uint32_t Type,
SmallVectorImpl<char> &Result) const;
- uint32_t getRelrRelocationType() const;
+ uint32_t getRelativeRelocationType() const;
const char *getDynamicTagAsString(unsigned Arch, uint64_t Type) const;
const char *getDynamicTagAsString(uint64_t Type) const;
@@ -415,8 +415,8 @@ void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
}
template <class ELFT>
-uint32_t ELFFile<ELFT>::getRelrRelocationType() const {
- return getELFRelrRelocationType(getHeader()->e_machine);
+uint32_t ELFFile<ELFT>::getRelativeRelocationType() const {
+ return getELFRelativeRelocationType(getHeader()->e_machine);
}
template <class ELFT>
diff --git a/contrib/llvm/include/llvm/Object/ELFObjectFile.h b/contrib/llvm/include/llvm/Object/ELFObjectFile.h
index 2c0905d545a7..0f620681cd99 100644
--- a/contrib/llvm/include/llvm/Object/ELFObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ELFObjectFile.h
@@ -86,6 +86,8 @@ public:
void setARMSubArch(Triple &TheTriple) const override;
virtual uint16_t getEType() const = 0;
+
+ std::vector<std::pair<DataRefImpl, uint64_t>> getPltAddresses() const;
};
class ELFSectionRef : public SectionRef {
@@ -258,6 +260,8 @@ protected:
bool isSectionData(DataRefImpl Sec) const override;
bool isSectionBSS(DataRefImpl Sec) const override;
bool isSectionVirtual(DataRefImpl Sec) const override;
+ bool isBerkeleyText(DataRefImpl Sec) const override;
+ bool isBerkeleyData(DataRefImpl Sec) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
std::vector<SectionRef> dynamic_relocation_sections() const override;
@@ -331,9 +335,10 @@ protected:
// A symbol is exported if its binding is either GLOBAL or WEAK, and its
// visibility is either DEFAULT or PROTECTED. All other symbols are not
// exported.
- return ((Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK) &&
- (Visibility == ELF::STV_DEFAULT ||
- Visibility == ELF::STV_PROTECTED));
+ return (
+ (Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK ||
+ Binding == ELF::STB_GNU_UNIQUE) &&
+ (Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_PROTECTED));
}
// This flag is used for classof, to distinguish ELFObjectFile from
@@ -757,6 +762,20 @@ bool ELFObjectFile<ELFT>::isSectionVirtual(DataRefImpl Sec) const {
}
template <class ELFT>
+bool ELFObjectFile<ELFT>::isBerkeleyText(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_flags & ELF::SHF_ALLOC &&
+ (getSection(Sec)->sh_flags & ELF::SHF_EXECINSTR ||
+ !(getSection(Sec)->sh_flags & ELF::SHF_WRITE));
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isBerkeleyData(DataRefImpl Sec) const {
+ const Elf_Shdr *EShdr = getSection(Sec);
+ return !isBerkeleyText(Sec) && EShdr->sh_type != ELF::SHT_NOBITS &&
+ EShdr->sh_flags & ELF::SHF_ALLOC;
+}
+
+template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
DataRefImpl RelData;
@@ -1019,6 +1038,8 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
return "ELF32-lanai";
case ELF::EM_MIPS:
return "ELF32-mips";
+ case ELF::EM_MSP430:
+ return "ELF32-msp430";
case ELF::EM_PPC:
return "ELF32-ppc";
case ELF::EM_RISCV:
@@ -1089,6 +1110,8 @@ template <class ELFT> Triple::ArchType ELFObjectFile<ELFT>::getArch() const {
default:
report_fatal_error("Invalid ELFCLASS!");
}
+ case ELF::EM_MSP430:
+ return Triple::msp430;
case ELF::EM_PPC:
return Triple::ppc;
case ELF::EM_PPC64:
diff --git a/contrib/llvm/include/llvm/Object/ELFTypes.h b/contrib/llvm/include/llvm/Object/ELFTypes.h
index fb386120e34d..ec3c8e7bae46 100644
--- a/contrib/llvm/include/llvm/Object/ELFTypes.h
+++ b/contrib/llvm/include/llvm/Object/ELFTypes.h
@@ -605,13 +605,12 @@ public:
}
/// Get the note's descriptor.
- ArrayRef<Elf_Word> getDesc() const {
+ ArrayRef<uint8_t> getDesc() const {
if (!Nhdr.n_descsz)
- return ArrayRef<Elf_Word>();
- return ArrayRef<Elf_Word>(
- reinterpret_cast<const Elf_Word *>(
- reinterpret_cast<const uint8_t *>(&Nhdr) + sizeof(Nhdr) +
- alignTo<Elf_Nhdr_Impl<ELFT>::Align>(Nhdr.n_namesz)),
+ return ArrayRef<uint8_t>();
+ return ArrayRef<uint8_t>(
+ reinterpret_cast<const uint8_t *>(&Nhdr) + sizeof(Nhdr) +
+ alignTo<Elf_Nhdr_Impl<ELFT>::Align>(Nhdr.n_namesz),
Nhdr.n_descsz);
}
@@ -643,14 +642,19 @@ class Elf_Note_Iterator_Impl
// container, either cleanly or with an overflow error.
void advanceNhdr(const uint8_t *NhdrPos, size_t NoteSize) {
RemainingSize -= NoteSize;
- if (RemainingSize == 0u)
+ if (RemainingSize == 0u) {
+ // Ensure that if the iterator walks to the end, the error is checked
+ // afterwards.
+ *Err = Error::success();
Nhdr = nullptr;
- else if (sizeof(*Nhdr) > RemainingSize)
+ } else if (sizeof(*Nhdr) > RemainingSize)
stopWithOverflowError();
else {
Nhdr = reinterpret_cast<const Elf_Nhdr_Impl<ELFT> *>(NhdrPos + NoteSize);
if (Nhdr->getSize() > RemainingSize)
stopWithOverflowError();
+ else
+ *Err = Error::success();
}
}
@@ -658,6 +662,7 @@ class Elf_Note_Iterator_Impl
explicit Elf_Note_Iterator_Impl(Error &Err) : Err(&Err) {}
Elf_Note_Iterator_Impl(const uint8_t *Start, size_t Size, Error &Err)
: RemainingSize(Size), Err(&Err) {
+ consumeError(std::move(Err));
assert(Start && "ELF note iterator starting at NULL");
advanceNhdr(Start, 0u);
}
@@ -671,6 +676,10 @@ public:
return *this;
}
bool operator==(Elf_Note_Iterator_Impl Other) const {
+ if (!Nhdr && Other.Err)
+ (void)(bool)(*Other.Err);
+ if (!Other.Nhdr && Err)
+ (void)(bool)(*Err);
return Nhdr == Other.Nhdr;
}
bool operator!=(Elf_Note_Iterator_Impl Other) const {
diff --git a/contrib/llvm/include/llvm/Object/Error.h b/contrib/llvm/include/llvm/Object/Error.h
index eb938338715d..a15f8b9236eb 100644
--- a/contrib/llvm/include/llvm/Object/Error.h
+++ b/contrib/llvm/include/llvm/Object/Error.h
@@ -50,6 +50,7 @@ inline std::error_code make_error_code(object_error e) {
/// Currently inherits from ECError for easy interoperability with
/// std::error_code, but this will be removed in the future.
class BinaryError : public ErrorInfo<BinaryError, ECError> {
+ virtual void anchor();
public:
static char ID;
BinaryError() {
diff --git a/contrib/llvm/include/llvm/Object/MachO.h b/contrib/llvm/include/llvm/Object/MachO.h
index 159c1765ab86..c2f4f4062934 100644
--- a/contrib/llvm/include/llvm/Object/MachO.h
+++ b/contrib/llvm/include/llvm/Object/MachO.h
@@ -356,7 +356,7 @@ public:
basic_symbol_iterator symbol_end() const override;
// MachO specific.
- basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
+ symbol_iterator getSymbolByIndex(unsigned Index) const;
uint64_t getSymbolIndex(DataRefImpl Symb) const;
section_iterator section_begin() const override;
@@ -616,6 +616,9 @@ public:
case MachO::PLATFORM_TVOS: return "tvos";
case MachO::PLATFORM_WATCHOS: return "watchos";
case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
+ case MachO::PLATFORM_IOSSIMULATOR: return "iossimulator";
+ case MachO::PLATFORM_TVOSSIMULATOR: return "tvossimulator";
+ case MachO::PLATFORM_WATCHOSSIMULATOR: return "watchossimulator";
default:
std::string ret;
raw_string_ostream ss(ret);
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
index 02d62e8e4879..036c99cb6baf 100644
--- a/contrib/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -104,13 +104,25 @@ public:
uint64_t getAlignment() const;
bool isCompressed() const;
+ /// Whether this section contains instructions.
bool isText() const;
+ /// Whether this section contains data, not instructions.
bool isData() const;
+ /// Whether this section contains BSS uninitialized data.
bool isBSS() const;
bool isVirtual() const;
bool isBitcode() const;
bool isStripped() const;
+ /// Whether this section will be placed in the text segment, according to the
+ /// Berkeley size format. This is true if the section is allocatable, and
+ /// contains either code or readonly data.
+ bool isBerkeleyText() const;
+ /// Whether this section will be placed in the data segment, according to the
+ /// Berkeley size format. This is true if the section is allocatable and
+ /// contains data (e.g. PROGBITS), but is not text.
+ bool isBerkeleyData() const;
+
bool containsSymbol(SymbolRef S) const;
relocation_iterator relocation_begin() const;
@@ -238,6 +250,8 @@ protected:
virtual bool isSectionVirtual(DataRefImpl Sec) const = 0;
virtual bool isSectionBitcode(DataRefImpl Sec) const;
virtual bool isSectionStripped(DataRefImpl Sec) const;
+ virtual bool isBerkeleyText(DataRefImpl Sec) const;
+ virtual bool isBerkeleyData(DataRefImpl Sec) const;
virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
virtual section_iterator getRelocatedSection(DataRefImpl Sec) const;
@@ -449,6 +463,14 @@ inline bool SectionRef::isStripped() const {
return OwningObject->isSectionStripped(SectionPimpl);
}
+inline bool SectionRef::isBerkeleyText() const {
+ return OwningObject->isBerkeleyText(SectionPimpl);
+}
+
+inline bool SectionRef::isBerkeleyData() const {
+ return OwningObject->isBerkeleyData(SectionPimpl);
+}
+
inline relocation_iterator SectionRef::relocation_begin() const {
return OwningObject->section_rel_begin(SectionPimpl);
}
diff --git a/contrib/llvm/include/llvm/Object/RelocVisitor.h b/contrib/llvm/include/llvm/Object/RelocVisitor.h
index 008e109f6679..9a978de2e599 100644
--- a/contrib/llvm/include/llvm/Object/RelocVisitor.h
+++ b/contrib/llvm/include/llvm/Object/RelocVisitor.h
@@ -129,6 +129,8 @@ private:
case ELF::R_X86_64_NONE:
return 0;
case ELF::R_X86_64_64:
+ case ELF::R_X86_64_DTPOFF32:
+ case ELF::R_X86_64_DTPOFF64:
return Value + getELFAddend(R);
case ELF::R_X86_64_PC32:
return Value + getELFAddend(R) - R.getOffset();
@@ -333,6 +335,7 @@ private:
case wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB:
case wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32:
case wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32:
+ case wasm::R_WEBASSEMBLY_EVENT_INDEX_LEB:
// For wasm section, its offset at 0 -- ignoring Value
return 0;
}
diff --git a/contrib/llvm/include/llvm/Object/Wasm.h b/contrib/llvm/include/llvm/Object/Wasm.h
index fd34e45feb62..ed857652a048 100644
--- a/contrib/llvm/include/llvm/Object/Wasm.h
+++ b/contrib/llvm/include/llvm/Object/Wasm.h
@@ -18,10 +18,11 @@
#define LLVM_OBJECT_WASM_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/MC/MCSymbolWasm.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
@@ -36,13 +37,16 @@ namespace object {
class WasmSymbol {
public:
WasmSymbol(const wasm::WasmSymbolInfo &Info,
- const wasm::WasmSignature *FunctionType,
- const wasm::WasmGlobalType *GlobalType)
- : Info(Info), FunctionType(FunctionType), GlobalType(GlobalType) {}
+ const wasm::WasmGlobalType *GlobalType,
+ const wasm::WasmEventType *EventType,
+ const wasm::WasmSignature *Signature)
+ : Info(Info), GlobalType(GlobalType), EventType(EventType),
+ Signature(Signature) {}
const wasm::WasmSymbolInfo &Info;
- const wasm::WasmSignature *FunctionType;
const wasm::WasmGlobalType *GlobalType;
+ const wasm::WasmEventType *EventType;
+ const wasm::WasmSignature *Signature;
bool isTypeFunction() const {
return Info.Kind == wasm::WASM_SYMBOL_TYPE_FUNCTION;
@@ -58,6 +62,8 @@ public:
return Info.Kind == wasm::WASM_SYMBOL_TYPE_SECTION;
}
+ bool isTypeEvent() const { return Info.Kind == wasm::WASM_SYMBOL_TYPE_EVENT; }
+
bool isDefined() const { return !isUndefined(); }
bool isUndefined() const {
@@ -98,9 +104,9 @@ public:
struct WasmSection {
WasmSection() = default;
- uint32_t Type = 0; // Section type (See below)
- uint32_t Offset = 0; // Offset with in the file
- StringRef Name; // Section name (User-defined sections only)
+ uint32_t Type = 0; // Section type (See below)
+ uint32_t Offset = 0; // Offset with in the file
+ StringRef Name; // Section name (User-defined sections only)
ArrayRef<uint8_t> Content; // Section content
std::vector<wasm::WasmRelocation> Relocations; // Relocations for this section
};
@@ -119,19 +125,21 @@ public:
const WasmSymbol &getWasmSymbol(const DataRefImpl &Symb) const;
const WasmSymbol &getWasmSymbol(const SymbolRef &Symbol) const;
const WasmSection &getWasmSection(const SectionRef &Section) const;
- const wasm::WasmRelocation &getWasmRelocation(const RelocationRef& Ref) const;
+ const wasm::WasmRelocation &getWasmRelocation(const RelocationRef &Ref) const;
static bool classof(const Binary *v) { return v->isWasm(); }
+ const wasm::WasmDylinkInfo &dylinkInfo() const { return DylinkInfo; }
ArrayRef<wasm::WasmSignature> types() const { return Signatures; }
ArrayRef<uint32_t> functionTypes() const { return FunctionTypes; }
ArrayRef<wasm::WasmImport> imports() const { return Imports; }
ArrayRef<wasm::WasmTable> tables() const { return Tables; }
ArrayRef<wasm::WasmLimits> memories() const { return Memories; }
ArrayRef<wasm::WasmGlobal> globals() const { return Globals; }
+ ArrayRef<wasm::WasmEvent> events() const { return Events; }
ArrayRef<wasm::WasmExport> exports() const { return Exports; }
ArrayRef<WasmSymbol> syms() const { return Symbols; }
- const wasm::WasmLinkingData& linkingData() const { return LinkingData; }
+ const wasm::WasmLinkingData &linkingData() const { return LinkingData; }
uint32_t getNumberOfSymbols() const { return Symbols.size(); }
ArrayRef<wasm::WasmElemSegment> elements() const { return ElemSegments; }
ArrayRef<WasmSegment> dataSegments() const { return DataSegments; }
@@ -140,6 +148,7 @@ public:
uint32_t startFunction() const { return StartFunction; }
uint32_t getNumImportedGlobals() const { return NumImportedGlobals; }
uint32_t getNumImportedFunctions() const { return NumImportedFunctions; }
+ uint32_t getNumImportedEvents() const { return NumImportedEvents; }
void moveSymbolNext(DataRefImpl &Symb) const override;
@@ -151,7 +160,7 @@ public:
Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
- uint64_t getWasmSymbolValue(const WasmSymbol& Sym) const;
+ uint64_t getWasmSymbolValue(const WasmSymbol &Sym) const;
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
@@ -192,6 +201,7 @@ public:
Triple::ArchType getArch() const override;
SubtargetFeatures getFeatures() const override;
bool isRelocatableObject() const override;
+ bool isSharedObject() const;
struct ReadContext {
const uint8_t *Start;
@@ -204,12 +214,16 @@ private:
bool isDefinedFunctionIndex(uint32_t Index) const;
bool isValidGlobalIndex(uint32_t Index) const;
bool isDefinedGlobalIndex(uint32_t Index) const;
+ bool isValidEventIndex(uint32_t Index) const;
+ bool isDefinedEventIndex(uint32_t Index) const;
bool isValidFunctionSymbol(uint32_t Index) const;
bool isValidGlobalSymbol(uint32_t Index) const;
+ bool isValidEventSymbol(uint32_t Index) const;
bool isValidDataSymbol(uint32_t Index) const;
bool isValidSectionSymbol(uint32_t Index) const;
wasm::WasmFunction &getDefinedFunction(uint32_t Index);
wasm::WasmGlobal &getDefinedGlobal(uint32_t Index);
+ wasm::WasmEvent &getDefinedEvent(uint32_t Index);
const WasmSection &getWasmSection(DataRefImpl Ref) const;
const wasm::WasmRelocation &getWasmRelocation(DataRefImpl Ref) const;
@@ -225,6 +239,7 @@ private:
Error parseTableSection(ReadContext &Ctx);
Error parseMemorySection(ReadContext &Ctx);
Error parseGlobalSection(ReadContext &Ctx);
+ Error parseEventSection(ReadContext &Ctx);
Error parseExportSection(ReadContext &Ctx);
Error parseStartSection(ReadContext &Ctx);
Error parseElemSection(ReadContext &Ctx);
@@ -232,6 +247,7 @@ private:
Error parseDataSection(ReadContext &Ctx);
// Custom section types
+ Error parseDylinkSection(ReadContext &Ctx);
Error parseNameSection(ReadContext &Ctx);
Error parseLinkingSection(ReadContext &Ctx);
Error parseLinkingSectionSymtab(ReadContext &Ctx);
@@ -240,11 +256,13 @@ private:
wasm::WasmObjectHeader Header;
std::vector<WasmSection> Sections;
+ wasm::WasmDylinkInfo DylinkInfo;
std::vector<wasm::WasmSignature> Signatures;
std::vector<uint32_t> FunctionTypes;
std::vector<wasm::WasmTable> Tables;
std::vector<wasm::WasmLimits> Memories;
std::vector<wasm::WasmGlobal> Globals;
+ std::vector<wasm::WasmEvent> Events;
std::vector<wasm::WasmImport> Imports;
std::vector<wasm::WasmExport> Exports;
std::vector<wasm::WasmElemSegment> ElemSegments;
@@ -254,18 +272,63 @@ private:
std::vector<wasm::WasmFunctionName> DebugNames;
uint32_t StartFunction = -1;
bool HasLinkingSection = false;
+ bool HasDylinkSection = false;
wasm::WasmLinkingData LinkingData;
uint32_t NumImportedGlobals = 0;
uint32_t NumImportedFunctions = 0;
+ uint32_t NumImportedEvents = 0;
uint32_t CodeSection = 0;
uint32_t DataSection = 0;
uint32_t GlobalSection = 0;
+ uint32_t EventSection = 0;
+};
+
+class WasmSectionOrderChecker {
+public:
+ // We define orders for all core wasm sections and known custom sections.
+ enum : int {
+ // Core sections
+ // The order of standard sections is precisely given by the spec.
+ WASM_SEC_ORDER_TYPE = 1,
+ WASM_SEC_ORDER_IMPORT = 2,
+ WASM_SEC_ORDER_FUNCTION = 3,
+ WASM_SEC_ORDER_TABLE = 4,
+ WASM_SEC_ORDER_MEMORY = 5,
+ WASM_SEC_ORDER_GLOBAL = 6,
+ WASM_SEC_ORDER_EVENT = 7,
+ WASM_SEC_ORDER_EXPORT = 8,
+ WASM_SEC_ORDER_START = 9,
+ WASM_SEC_ORDER_ELEM = 10,
+ WASM_SEC_ORDER_DATACOUNT = 11,
+ WASM_SEC_ORDER_CODE = 12,
+ WASM_SEC_ORDER_DATA = 13,
+
+ // Custom sections
+ // "dylink" should be the very first section in the module
+ WASM_SEC_ORDER_DYLINK = 0,
+ // "linking" section requires DATA section in order to validate data symbols
+ WASM_SEC_ORDER_LINKING = 100,
+ // Must come after "linking" section in order to validate reloc indexes.
+ WASM_SEC_ORDER_RELOC = 101,
+ // "name" section must appear after DATA. Comes after "linking" to allow
+ // symbol table to set default function name.
+ WASM_SEC_ORDER_NAME = 102,
+ // "producers" section must appear after "name" section.
+ WASM_SEC_ORDER_PRODUCERS = 103
+ };
+
+ bool isValidSectionOrder(unsigned ID, StringRef CustomSectionName = "");
+
+private:
+ int LastOrder = -1; // Lastly seen known section's order
+
+ // Returns -1 for unknown sections.
+ int getSectionOrder(unsigned ID, StringRef CustomSectionName = "");
};
} // end namespace object
-inline raw_ostream &operator<<(raw_ostream &OS,
- const object::WasmSymbol &Sym) {
+inline raw_ostream &operator<<(raw_ostream &OS, const object::WasmSymbol &Sym) {
Sym.print(OS);
return OS;
}
diff --git a/contrib/llvm/include/llvm/Object/WasmTraits.h b/contrib/llvm/include/llvm/Object/WasmTraits.h
index ebcd00b15227..049d72f79e41 100644
--- a/contrib/llvm/include/llvm/Object/WasmTraits.h
+++ b/contrib/llvm/include/llvm/Object/WasmTraits.h
@@ -24,14 +24,20 @@ template <typename T> struct DenseMapInfo;
// Traits for using WasmSignature in a DenseMap.
template <> struct DenseMapInfo<wasm::WasmSignature> {
static wasm::WasmSignature getEmptyKey() {
- return wasm::WasmSignature{{}, 1};
+ wasm::WasmSignature Sig;
+ Sig.State = wasm::WasmSignature::Empty;
+ return Sig;
}
static wasm::WasmSignature getTombstoneKey() {
- return wasm::WasmSignature{{}, 2};
+ wasm::WasmSignature Sig;
+ Sig.State = wasm::WasmSignature::Tombstone;
+ return Sig;
}
static unsigned getHashValue(const wasm::WasmSignature &Sig) {
- unsigned H = hash_value(Sig.ReturnType);
- for (int32_t Param : Sig.ParamTypes)
+ uintptr_t H = hash_value(Sig.State);
+ for (auto Ret : Sig.Returns)
+ H = hash_combine(H, Ret);
+ for (auto Param : Sig.Params)
H = hash_combine(H, Param);
return H;
}
diff --git a/contrib/llvm/include/llvm/ObjectYAML/COFFYAML.h b/contrib/llvm/include/llvm/ObjectYAML/COFFYAML.h
index 78f021fc0386..253c627dd683 100644
--- a/contrib/llvm/include/llvm/ObjectYAML/COFFYAML.h
+++ b/contrib/llvm/include/llvm/ObjectYAML/COFFYAML.h
@@ -58,7 +58,13 @@ LLVM_YAML_STRONG_TYPEDEF(uint8_t, AuxSymbolType)
struct Relocation {
uint32_t VirtualAddress;
uint16_t Type;
+
+ // Normally a Relocation can refer to the symbol via its name.
+ // It can also use a direct symbol table index instead (with no name
+ // specified), allowing disambiguating between multiple symbols with the
+ // same name or crafting intentionally broken files for testing.
StringRef SymbolName;
+ Optional<uint32_t> SymbolTableIndex;
};
struct Section {
diff --git a/contrib/llvm/include/llvm/ObjectYAML/ELFYAML.h b/contrib/llvm/include/llvm/ObjectYAML/ELFYAML.h
index 6fc69735f1c7..f2b0c35521f0 100644
--- a/contrib/llvm/include/llvm/ObjectYAML/ELFYAML.h
+++ b/contrib/llvm/include/llvm/ObjectYAML/ELFYAML.h
@@ -68,6 +68,7 @@ struct FileHeader {
ELF_ELFCLASS Class;
ELF_ELFDATA Data;
ELF_ELFOSABI OSABI;
+ llvm::yaml::Hex8 ABIVersion;
ELF_ET Type;
ELF_EM Machine;
ELF_EF Flags;
@@ -123,6 +124,7 @@ struct Section {
StringRef Link;
StringRef Info;
llvm::yaml::Hex64 AddressAlign;
+ Optional<llvm::yaml::Hex64> EntSize;
Section(SectionKind Kind) : Kind(Kind) {}
virtual ~Section();
diff --git a/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h b/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
index 8cd08e520560..406dd7cb515f 100644
--- a/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
+++ b/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
@@ -74,6 +74,12 @@ struct Global {
wasm::WasmInitExpr InitExpr;
};
+struct Event {
+ uint32_t Index;
+ uint32_t Attribute;
+ uint32_t SigIndex;
+};
+
struct Import {
StringRef Module;
StringRef Field;
@@ -83,6 +89,7 @@ struct Import {
Global GlobalImport;
Table TableImport;
Limits Memory;
+ Event EventImport;
};
};
@@ -176,6 +183,21 @@ struct CustomSection : Section {
yaml::BinaryRef Payload;
};
+struct DylinkSection : CustomSection {
+ DylinkSection() : CustomSection("dylink") {}
+
+ static bool classof(const Section *S) {
+ auto C = dyn_cast<CustomSection>(S);
+ return C && C->Name == "dylink";
+ }
+
+ uint32_t MemorySize;
+ uint32_t MemoryAlignment;
+ uint32_t TableSize;
+ uint32_t TableAlignment;
+ std::vector<StringRef> Needed;
+};
+
struct NameSection : CustomSection {
NameSection() : CustomSection("name") {}
@@ -262,6 +284,16 @@ struct GlobalSection : Section {
std::vector<Global> Globals;
};
+struct EventSection : Section {
+ EventSection() : Section(wasm::WASM_SEC_EVENT) {}
+
+ static bool classof(const Section *S) {
+ return S->Type == wasm::WASM_SEC_EVENT;
+ }
+
+ std::vector<Event> Events;
+};
+
struct ExportSection : Section {
ExportSection() : Section(wasm::WASM_SEC_EXPORT) {}
@@ -339,6 +371,7 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::SymbolInfo)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::InitFunction)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ComdatEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Comdat)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Event)
namespace llvm {
namespace yaml {
@@ -471,6 +504,10 @@ template <> struct ScalarEnumerationTraits<WasmYAML::RelocType> {
static void enumeration(IO &IO, WasmYAML::RelocType &Kind);
};
+template <> struct MappingTraits<WasmYAML::Event> {
+ static void mapping(IO &IO, WasmYAML::Event &Event);
+};
+
} // end namespace yaml
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Option/OptTable.h b/contrib/llvm/include/llvm/Option/OptTable.h
index 743c4772c98c..fdb05d8a15af 100644
--- a/contrib/llvm/include/llvm/Option/OptTable.h
+++ b/contrib/llvm/include/llvm/Option/OptTable.h
@@ -217,8 +217,8 @@ public:
/// Render the help text for an option table.
///
/// \param OS - The stream to write the help text to.
- /// \param Name - The name to use in the usage line.
- /// \param Title - The title to use in the usage line.
+ /// \param Usage - USAGE: Usage
+ /// \param Title - OVERVIEW: Title
/// \param FlagsToInclude - If non-zero, only include options with any
/// of these flags set.
/// \param FlagsToExclude - Exclude options with any of these flags set.
@@ -226,11 +226,11 @@ public:
/// that don't have help texts. By default, we display
/// only options that are not hidden and have help
/// texts.
- void PrintHelp(raw_ostream &OS, const char *Name, const char *Title,
+ void PrintHelp(raw_ostream &OS, const char *Usage, const char *Title,
unsigned FlagsToInclude, unsigned FlagsToExclude,
bool ShowAllAliases) const;
- void PrintHelp(raw_ostream &OS, const char *Name, const char *Title,
+ void PrintHelp(raw_ostream &OS, const char *Usage, const char *Title,
bool ShowHidden = false, bool ShowAllAliases = false) const;
};
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
index d65347d611ea..5935a0853d32 100644
--- a/contrib/llvm/include/llvm/Pass.h
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -356,17 +356,6 @@ protected:
/// This is the storage for the -time-passes option.
extern bool TimePassesIsEnabled;
-/// isFunctionInPrintList - returns true if a function should be printed via
-// debugging options like -print-after-all/-print-before-all.
-// Tells if the function IR should be printed by PrinterPass.
-extern bool isFunctionInPrintList(StringRef FunctionName);
-
-/// forcePrintModuleIR - returns true if IR printing passes should
-// be printing module IR (even for local-pass printers e.g. function-pass)
-// to provide more context, as enabled by debugging option -print-module-scope
-// Tells if IR printer should be printing module IR
-extern bool forcePrintModuleIR();
-
} // end namespace llvm
// Include support files that contain important APIs commonly used by Passes,
diff --git a/contrib/llvm/include/llvm/Passes/PassBuilder.h b/contrib/llvm/include/llvm/Passes/PassBuilder.h
index 24a93bc76af5..fa59345a02cf 100644
--- a/contrib/llvm/include/llvm/Passes/PassBuilder.h
+++ b/contrib/llvm/include/llvm/Passes/PassBuilder.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Error.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include <vector>
@@ -32,10 +33,13 @@ class ModuleSummaryIndex;
/// A struct capturing PGO tunables.
struct PGOOptions {
PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
- std::string SampleProfileFile = "", bool RunProfileGen = false,
- bool SamplePGOSupport = false)
+ std::string SampleProfileFile = "",
+ std::string ProfileRemappingFile = "",
+ bool RunProfileGen = false, bool SamplePGOSupport = false)
: ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
- SampleProfileFile(SampleProfileFile), RunProfileGen(RunProfileGen),
+ SampleProfileFile(SampleProfileFile),
+ ProfileRemappingFile(ProfileRemappingFile),
+ RunProfileGen(RunProfileGen),
SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
assert((RunProfileGen ||
!SampleProfileFile.empty() ||
@@ -45,6 +49,7 @@ struct PGOOptions {
std::string ProfileGenFile;
std::string ProfileUseFile;
std::string SampleProfileFile;
+ std::string ProfileRemappingFile;
bool RunProfileGen;
bool SamplePGOSupport;
};
@@ -58,6 +63,7 @@ struct PGOOptions {
class PassBuilder {
TargetMachine *TM;
Optional<PGOOptions> PGOOpt;
+ PassInstrumentationCallbacks *PIC;
public:
/// A struct to capture parsed pass pipeline names.
@@ -172,8 +178,9 @@ public:
};
explicit PassBuilder(TargetMachine *TM = nullptr,
- Optional<PGOOptions> PGOOpt = None)
- : TM(TM), PGOOpt(PGOOpt) {}
+ Optional<PGOOptions> PGOOpt = None,
+ PassInstrumentationCallbacks *PIC = nullptr)
+ : TM(TM), PGOOpt(PGOOpt), PIC(PIC) {}
/// Cross register the analysis managers through their proxies.
///
@@ -378,8 +385,9 @@ public:
/// If the sequence of passes aren't all the exact same kind of pass, it will
/// be an error. You cannot mix different levels implicitly, you must
/// explicitly form a pass manager in which to nest passes.
- bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
- bool VerifyEachPass = true, bool DebugLogging = false);
+ Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
+ bool VerifyEachPass = true,
+ bool DebugLogging = false);
/// {{@ Parse a textual pass pipeline description into a specific PassManager
///
@@ -388,12 +396,15 @@ public:
/// this is the valid pipeline text:
///
/// function(lpass)
- bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
- bool VerifyEachPass = true, bool DebugLogging = false);
- bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
- bool VerifyEachPass = true, bool DebugLogging = false);
- bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
- bool VerifyEachPass = true, bool DebugLogging = false);
+ Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
+ bool VerifyEachPass = true,
+ bool DebugLogging = false);
+ Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
+ bool VerifyEachPass = true,
+ bool DebugLogging = false);
+ Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
+ bool VerifyEachPass = true,
+ bool DebugLogging = false);
/// @}}
/// Parse a textual alias analysis pipeline into the provided AA manager.
@@ -411,7 +422,7 @@ public:
/// Returns false if the text cannot be parsed cleanly. The specific state of
/// the \p AA manager is unspecified if such an error is encountered and this
/// returns false.
- bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
+ Error parseAAPipeline(AAManager &AA, StringRef PipelineText);
/// Register a callback for a default optimizer pipeline extension
/// point
@@ -490,6 +501,18 @@ public:
PipelineStartEPCallbacks.push_back(C);
}
+ /// Register a callback for a default optimizer pipeline extension point
+ ///
+ /// This extension point allows adding optimizations at the very end of the
+ /// function optimization pipeline. A key difference between this and the
+ /// legacy PassManager's OptimizerLast callback is that this extension point
+ /// is not triggered at O0. Extensions to the O0 pipeline should append their
+ /// passes to the end of the overall pipeline.
+ void registerOptimizerLastEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ OptimizerLastEPCallbacks.push_back(C);
+ }
+
/// Register a callback for parsing an AliasAnalysis Name to populate
/// the given AAManager \p AA
void registerParseAACallback(
@@ -559,33 +582,34 @@ private:
static Optional<std::vector<PipelineElement>>
parsePipelineText(StringRef Text);
- bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
+ Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
+ Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
- bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
+ Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
+ Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
- bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
- bool VerifyEachPass, bool DebugLogging);
- bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
- bool VerifyEachPass, bool DebugLogging);
bool parseAAPassName(AAManager &AA, StringRef Name);
- bool parseLoopPassPipeline(LoopPassManager &LPM,
- ArrayRef<PipelineElement> Pipeline,
- bool VerifyEachPass, bool DebugLogging);
- bool parseFunctionPassPipeline(FunctionPassManager &FPM,
- ArrayRef<PipelineElement> Pipeline,
- bool VerifyEachPass, bool DebugLogging);
- bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
+ Error parseLoopPassPipeline(LoopPassManager &LPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
- bool parseModulePassPipeline(ModulePassManager &MPM,
+ Error parseFunctionPassPipeline(FunctionPassManager &FPM,
+ ArrayRef<PipelineElement> Pipeline,
+ bool VerifyEachPass, bool DebugLogging);
+ Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
+ Error parseModulePassPipeline(ModulePassManager &MPM,
+ ArrayRef<PipelineElement> Pipeline,
+ bool VerifyEachPass, bool DebugLogging);
void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
OptimizationLevel Level, bool RunProfileGen,
std::string ProfileGenFile,
- std::string ProfileUseFile);
+ std::string ProfileUseFile,
+ std::string ProfileRemappingFile);
void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
@@ -602,6 +626,8 @@ private:
CGSCCOptimizerLateEPCallbacks;
SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
VectorizerStartEPCallbacks;
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ OptimizerLastEPCallbacks;
// Module callbacks
SmallVector<std::function<void(ModulePassManager &)>, 2>
PipelineStartEPCallbacks;
diff --git a/contrib/llvm/include/llvm/Passes/StandardInstrumentations.h b/contrib/llvm/include/llvm/Passes/StandardInstrumentations.h
new file mode 100644
index 000000000000..8c6f5e1e22f7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Passes/StandardInstrumentations.h
@@ -0,0 +1,70 @@
+//===- StandardInstrumentations.h ------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header defines a class that provides bookkeeping for all standard
+/// (i.e in-tree) pass instrumentations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSES_STANDARDINSTRUMENTATIONS_H
+#define LLVM_PASSES_STANDARDINSTRUMENTATIONS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/IR/PassTimingInfo.h"
+
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class Module;
+
+/// Instrumentation to print IR before/after passes.
+///
+/// Needs state to be able to print module after pass that invalidates IR unit
+/// (typically Loop or SCC).
+class PrintIRInstrumentation {
+public:
+ PrintIRInstrumentation() = default;
+ ~PrintIRInstrumentation();
+
+ void registerCallbacks(PassInstrumentationCallbacks &PIC);
+
+private:
+ bool printBeforePass(StringRef PassID, Any IR);
+ void printAfterPass(StringRef PassID, Any IR);
+ void printAfterPassInvalidated(StringRef PassID);
+
+ using PrintModuleDesc = std::tuple<const Module *, std::string, StringRef>;
+
+ void pushModuleDesc(StringRef PassID, Any IR);
+ PrintModuleDesc popModuleDesc(StringRef PassID);
+
+ /// Stack of Module description, enough to print the module after a given
+ /// pass.
+ SmallVector<PrintModuleDesc, 2> ModuleDescStack;
+ bool StoreModuleDesc = false;
+};
+
+/// This class provides an interface to register all the standard pass
+/// instrumentations and manages their state (if any).
+class StandardInstrumentations {
+ PrintIRInstrumentation PrintIR;
+ TimePassesHandler TimePasses;
+
+public:
+ StandardInstrumentations() = default;
+
+ void registerCallbacks(PassInstrumentationCallbacks &PIC);
+};
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/contrib/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
index e820f71cb6d5..beaa36553287 100644
--- a/contrib/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
+++ b/contrib/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -510,7 +510,6 @@ class CoverageMapping {
DenseMap<size_t, DenseSet<size_t>> RecordProvenance;
std::vector<FunctionRecord> Functions;
std::vector<std::pair<std::string, uint64_t>> FuncHashMismatches;
- std::vector<std::pair<std::string, uint64_t>> FuncCounterMismatches;
CoverageMapping() = default;
@@ -537,9 +536,7 @@ public:
///
/// This is a count of functions whose profile is out of date or otherwise
/// can't be associated with any coverage information.
- unsigned getMismatchedCount() const {
- return FuncHashMismatches.size() + FuncCounterMismatches.size();
- }
+ unsigned getMismatchedCount() const { return FuncHashMismatches.size(); }
/// A hash mismatch occurs when a profile record for a symbol does not have
/// the same hash as a coverage mapping record for the same symbol. This
@@ -549,14 +546,6 @@ public:
return FuncHashMismatches;
}
- /// A counter mismatch occurs when there is an error when evaluating the
- /// counter expressions in a coverage mapping record. This returns a list of
- /// counter mismatches, where each mismatch is a pair of the symbol name and
- /// the number of valid evaluated counter expressions.
- ArrayRef<std::pair<std::string, uint64_t>> getCounterMismatches() const {
- return FuncCounterMismatches;
- }
-
/// Returns a lexicographically sorted, unique list of files that are
/// covered.
std::vector<StringRef> getUniqueSourceFiles() const;
diff --git a/contrib/llvm/include/llvm/ProfileData/GCOV.h b/contrib/llvm/include/llvm/ProfileData/GCOV.h
index 8500401e44ad..a088f63a6915 100644
--- a/contrib/llvm/include/llvm/ProfileData/GCOV.h
+++ b/contrib/llvm/include/llvm/ProfileData/GCOV.h
@@ -24,9 +24,11 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <limits>
#include <memory>
#include <string>
#include <utility>
@@ -266,13 +268,14 @@ struct GCOVEdge {
GCOVBlock &Src;
GCOVBlock &Dst;
uint64_t Count = 0;
+ uint64_t CyclesCount = 0;
};
/// GCOVFunction - Collects function information.
class GCOVFunction {
public:
- using BlockIterator = pointee_iterator<SmallVectorImpl<
- std::unique_ptr<GCOVBlock>>::const_iterator>;
+ using BlockIterator = pointee_iterator<
+ SmallVectorImpl<std::unique_ptr<GCOVBlock>>::const_iterator>;
GCOVFunction(GCOVFile &P) : Parent(P) {}
@@ -322,6 +325,9 @@ class GCOVBlock {
public:
using EdgeIterator = SmallVectorImpl<GCOVEdge *>::const_iterator;
+ using BlockVector = SmallVector<const GCOVBlock *, 4>;
+ using BlockVectorLists = SmallVector<BlockVector, 4>;
+ using Edges = SmallVector<GCOVEdge *, 4>;
GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N) {}
~GCOVBlock();
@@ -365,6 +371,16 @@ public:
void dump() const;
void collectLineCounts(FileInfo &FI);
+ static uint64_t getCycleCount(const Edges &Path);
+ static void unblock(const GCOVBlock *U, BlockVector &Blocked,
+ BlockVectorLists &BlockLists);
+ static bool lookForCircuit(const GCOVBlock *V, const GCOVBlock *Start,
+ Edges &Path, BlockVector &Blocked,
+ BlockVectorLists &BlockLists,
+ const BlockVector &Blocks, uint64_t &Count);
+ static void getCyclesCount(const BlockVector &Blocks, uint64_t &Count);
+ static uint64_t getLineCount(const BlockVector &Blocks);
+
private:
GCOVFunction &Parent;
uint32_t Number;
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProf.h b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
index 206142b3565a..dc45021fc47d 100644
--- a/contrib/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
@@ -544,9 +544,9 @@ Error InstrProfSymtab::create(const NameIterRange &IterRange) {
void InstrProfSymtab::finalizeSymtab() {
if (Sorted)
return;
- llvm::sort(MD5NameMap.begin(), MD5NameMap.end(), less_first());
- llvm::sort(MD5FuncMap.begin(), MD5FuncMap.end(), less_first());
- llvm::sort(AddrToMD5Map.begin(), AddrToMD5Map.end(), less_first());
+ llvm::sort(MD5NameMap, less_first());
+ llvm::sort(MD5FuncMap, less_first());
+ llvm::sort(AddrToMD5Map, less_first());
AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
AddrToMD5Map.end());
Sorted = true;
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h b/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h
index efc22dcd0d9a..08d782276117 100644
--- a/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h
@@ -349,12 +349,17 @@ using OnDiskHashTableImplV3 =
OnDiskIterableChainedHashTable<InstrProfLookupTrait>;
template <typename HashTableImpl>
+class InstrProfReaderItaniumRemapper;
+
+template <typename HashTableImpl>
class InstrProfReaderIndex : public InstrProfReaderIndexBase {
private:
std::unique_ptr<HashTableImpl> HashTable;
typename HashTableImpl::data_iterator RecordIterator;
uint64_t FormatVersion;
+ friend class InstrProfReaderItaniumRemapper<HashTableImpl>;
+
public:
InstrProfReaderIndex(const unsigned char *Buckets,
const unsigned char *const Payload,
@@ -386,13 +391,26 @@ public:
}
};
+/// Name matcher supporting fuzzy matching of symbol names to names in profiles.
+class InstrProfReaderRemapper {
+public:
+ virtual ~InstrProfReaderRemapper() {}
+ virtual Error populateRemappings() { return Error::success(); }
+ virtual Error getRecords(StringRef FuncName,
+ ArrayRef<NamedInstrProfRecord> &Data) = 0;
+};
+
/// Reader for the indexed binary instrprof format.
class IndexedInstrProfReader : public InstrProfReader {
private:
/// The profile data file contents.
std::unique_ptr<MemoryBuffer> DataBuffer;
+ /// The profile remapping file contents.
+ std::unique_ptr<MemoryBuffer> RemappingBuffer;
/// The index into the profile data.
std::unique_ptr<InstrProfReaderIndexBase> Index;
+ /// The profile remapping file contents.
+ std::unique_ptr<InstrProfReaderRemapper> Remapper;
/// Profile summary data.
std::unique_ptr<ProfileSummary> Summary;
// Index to the current record in the record array.
@@ -404,8 +422,11 @@ private:
const unsigned char *Cur);
public:
- IndexedInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
- : DataBuffer(std::move(DataBuffer)), RecordIndex(0) {}
+ IndexedInstrProfReader(
+ std::unique_ptr<MemoryBuffer> DataBuffer,
+ std::unique_ptr<MemoryBuffer> RemappingBuffer = nullptr)
+ : DataBuffer(std::move(DataBuffer)),
+ RemappingBuffer(std::move(RemappingBuffer)), RecordIndex(0) {}
IndexedInstrProfReader(const IndexedInstrProfReader &) = delete;
IndexedInstrProfReader &operator=(const IndexedInstrProfReader &) = delete;
@@ -434,10 +455,11 @@ public:
/// Factory method to create an indexed reader.
static Expected<std::unique_ptr<IndexedInstrProfReader>>
- create(const Twine &Path);
+ create(const Twine &Path, const Twine &RemappingPath = "");
static Expected<std::unique_ptr<IndexedInstrProfReader>>
- create(std::unique_ptr<MemoryBuffer> Buffer);
+ create(std::unique_ptr<MemoryBuffer> Buffer,
+ std::unique_ptr<MemoryBuffer> RemappingBuffer = nullptr);
// Used for testing purpose only.
void setValueProfDataEndianness(support::endianness Endianness) {
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProf.h b/contrib/llvm/include/llvm/ProfileData/SampleProf.h
index 0cd6dd2c2c0e..927dfd246878 100644
--- a/contrib/llvm/include/llvm/ProfileData/SampleProf.h
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProf.h
@@ -49,7 +49,8 @@ enum class sampleprof_error {
unsupported_writing_format,
truncated_name_table,
not_implemented,
- counter_overflow
+ counter_overflow,
+ ostream_seek_unsupported
};
inline std::error_code make_error_code(sampleprof_error E) {
@@ -293,6 +294,9 @@ public:
/// with the maximum total sample count.
const FunctionSamples *findFunctionSamplesAt(const LineLocation &Loc,
StringRef CalleeName) const {
+ std::string CalleeGUID;
+ CalleeName = getRepInFormat(CalleeName, Format, CalleeGUID);
+
auto iter = CallsiteSamples.find(Loc);
if (iter == CallsiteSamples.end())
return nullptr;
@@ -377,30 +381,53 @@ public:
/// GUID to \p S. Also traverse the BodySamples to add hot CallTarget's GUID
/// to \p S.
void findInlinedFunctions(DenseSet<GlobalValue::GUID> &S, const Module *M,
- uint64_t Threshold, bool isCompact) const {
+ uint64_t Threshold) const {
if (TotalSamples <= Threshold)
return;
- S.insert(Function::getGUID(Name));
+ S.insert(getGUID(Name));
// Import hot CallTargets, which may not be available in IR because full
// profile annotation cannot be done until backend compilation in ThinLTO.
for (const auto &BS : BodySamples)
for (const auto &TS : BS.second.getCallTargets())
if (TS.getValue() > Threshold) {
- Function *Callee = M->getFunction(TS.getKey());
+ const Function *Callee =
+ M->getFunction(getNameInModule(TS.getKey(), M));
if (!Callee || !Callee->getSubprogram())
- S.insert(isCompact ? std::stol(TS.getKey().data())
- : Function::getGUID(TS.getKey()));
+ S.insert(getGUID(TS.getKey()));
}
for (const auto &CS : CallsiteSamples)
for (const auto &NameFS : CS.second)
- NameFS.second.findInlinedFunctions(S, M, Threshold, isCompact);
+ NameFS.second.findInlinedFunctions(S, M, Threshold);
}
/// Set the name of the function.
void setName(StringRef FunctionName) { Name = FunctionName; }
/// Return the function name.
- const StringRef &getName() const { return Name; }
+ StringRef getName() const { return Name; }
+
+ /// Return the original function name if it exists in Module \p M.
+ StringRef getFuncNameInModule(const Module *M) const {
+ return getNameInModule(Name, M);
+ }
+
+ /// Translate \p Name into its original name in Module.
+ /// When the Format is not SPF_Compact_Binary, \p Name needs no translation.
+ /// When the Format is SPF_Compact_Binary, \p Name in current FunctionSamples
+ /// is actually GUID of the original function name. getNameInModule will
+ /// translate \p Name in current FunctionSamples into its original name.
+ /// If the original name doesn't exist in \p M, return empty StringRef.
+ StringRef getNameInModule(StringRef Name, const Module *M) const {
+ if (Format != SPF_Compact_Binary)
+ return Name;
+ // Expect CurrentModule to be initialized by GUIDToFuncNameMapper.
+ if (M != CurrentModule)
+ llvm_unreachable("Input Module should be the same as CurrentModule");
+ auto iter = GUIDToFuncNameMap.find(std::stoull(Name.data()));
+ if (iter == GUIDToFuncNameMap.end())
+ return StringRef();
+ return iter->second;
+ }
/// Returns the line offset to the start line of the subprogram.
/// We assume that a single function will not exceed 65535 LOC.
@@ -417,6 +444,54 @@ public:
/// \returns the FunctionSamples pointer to the inlined instance.
const FunctionSamples *findFunctionSamples(const DILocation *DIL) const;
+ static SampleProfileFormat Format;
+ /// GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
+ /// all the function symbols defined or declared in CurrentModule.
+ static DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
+ static Module *CurrentModule;
+
+ class GUIDToFuncNameMapper {
+ public:
+ GUIDToFuncNameMapper(Module &M) {
+ if (Format != SPF_Compact_Binary)
+ return;
+
+ for (const auto &F : M) {
+ StringRef OrigName = F.getName();
+ GUIDToFuncNameMap.insert({Function::getGUID(OrigName), OrigName});
+ /// Local to global var promotion used by optimization like thinlto
+ /// will rename the var and add suffix like ".llvm.xxx" to the
+ /// original local name. In sample profile, the suffixes of function
+ /// names are all stripped. Since it is possible that the mapper is
+ /// built in post-thin-link phase and var promotion has been done,
+ /// we need to add the substring of function name without the suffix
+ /// into the GUIDToFuncNameMap.
+ auto pos = OrigName.find('.');
+ if (pos != StringRef::npos) {
+ StringRef NewName = OrigName.substr(0, pos);
+ GUIDToFuncNameMap.insert({Function::getGUID(NewName), NewName});
+ }
+ }
+ CurrentModule = &M;
+ }
+
+ ~GUIDToFuncNameMapper() {
+ if (Format != SPF_Compact_Binary)
+ return;
+
+ GUIDToFuncNameMap.clear();
+ CurrentModule = nullptr;
+ }
+ };
+
+ // Assume the input \p Name is a name coming from FunctionSamples itself.
+ // If the format is SPF_Compact_Binary, the name is already a GUID and we
+ // don't want to return the GUID of GUID.
+ static uint64_t getGUID(StringRef Name) {
+ return (Format == SPF_Compact_Binary) ? std::stoull(Name.data())
+ : Function::getGUID(Name);
+ }
+
private:
/// Mangled name of the function.
StringRef Name;
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h b/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h
index 0617b05e8d4f..5cc729e42cc8 100644
--- a/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -222,6 +222,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SymbolRemappingReader.h"
#include <algorithm>
#include <cstdint>
#include <memory>
@@ -279,6 +280,8 @@ public:
/// Print the profile for \p FName on stream \p OS.
void dumpFunctionProfile(StringRef FName, raw_ostream &OS = dbgs());
+ virtual void collectFuncsToUse(const Module &M) {}
+
/// Print all the profiles on stream \p OS.
void dump(raw_ostream &OS = dbgs());
@@ -287,11 +290,16 @@ public:
// The function name may have been updated by adding suffix. In sample
// profile, the function names are all stripped, so we need to strip
// the function name suffix before matching with profile.
- StringRef Fname = F.getName().split('.').first;
+ return getSamplesFor(F.getName().split('.').first);
+ }
+
+ /// Return the samples collected for function \p F.
+ virtual FunctionSamples *getSamplesFor(StringRef Fname) {
std::string FGUID;
Fname = getRepInFormat(Fname, getFormat(), FGUID);
- if (Profiles.count(Fname))
- return &Profiles[Fname];
+ auto It = Profiles.find(Fname);
+ if (It != Profiles.end())
+ return &It->second;
return nullptr;
}
@@ -335,6 +343,12 @@ protected:
/// Profile summary information.
std::unique_ptr<ProfileSummary> Summary;
+ /// Take ownership of the summary of this reader.
+ static std::unique_ptr<ProfileSummary>
+ takeSummary(SampleProfileReader &Reader) {
+ return std::move(Reader.Summary);
+ }
+
/// Compute summary for this profile.
void computeSummary();
@@ -364,7 +378,7 @@ public:
: SampleProfileReader(std::move(B), C, Format) {}
/// Read and validate the file header.
- std::error_code readHeader() override;
+ virtual std::error_code readHeader() override;
/// Read sample profiles from the associated file.
std::error_code read() override;
@@ -378,6 +392,10 @@ protected:
/// \returns the read value.
template <typename T> ErrorOr<T> readNumber();
+ /// Read a numeric value of type T from the profile. The value is saved
+ /// without encoded.
+ template <typename T> ErrorOr<T> readUnencodedNumber();
+
/// Read a string from the profile.
///
/// If an error occurs during decoding, a diagnostic message is emitted and
@@ -392,6 +410,9 @@ protected:
/// Return true if we've reached the end of file.
bool at_eof() const { return Data >= End; }
+ /// Read the next function profile instance.
+ std::error_code readFuncProfile();
+
/// Read the contents of the given profile instance.
std::error_code readProfile(FunctionSamples &FProfile);
@@ -436,10 +457,17 @@ class SampleProfileReaderCompactBinary : public SampleProfileReaderBinary {
private:
/// Function name table.
std::vector<std::string> NameTable;
+ /// The table mapping from function name to the offset of its FunctionSample
+ /// towards file start.
+ DenseMap<StringRef, uint64_t> FuncOffsetTable;
+ /// The set containing the functions to use when compiling a module.
+ DenseSet<StringRef> FuncsToUse;
virtual std::error_code verifySPMagic(uint64_t Magic) override;
virtual std::error_code readNameTable() override;
/// Read a string indirectly via the name table.
virtual ErrorOr<StringRef> readStringFromTable() override;
+ virtual std::error_code readHeader() override;
+ std::error_code readFuncOffsetTable();
public:
SampleProfileReaderCompactBinary(std::unique_ptr<MemoryBuffer> B,
@@ -448,6 +476,12 @@ public:
/// \brief Return true if \p Buffer is in the format supported by this class.
static bool hasFormat(const MemoryBuffer &Buffer);
+
+ /// Read samples only for functions to use.
+ std::error_code read() override;
+
+ /// Collect functions to be used when compiling Module \p M.
+ void collectFuncsToUse(const Module &M) override;
};
using InlineCallStack = SmallVector<FunctionSamples *, 10>;
@@ -503,6 +537,44 @@ protected:
static const uint32_t GCOVTagAFDOFunction = 0xac000000;
};
+/// A profile data reader proxy that remaps the profile data from another
+/// sample profile data reader, by applying a provided set of equivalences
+/// between components of the symbol names in the profile.
+class SampleProfileReaderItaniumRemapper : public SampleProfileReader {
+public:
+ SampleProfileReaderItaniumRemapper(
+ std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
+ std::unique_ptr<SampleProfileReader> Underlying)
+ : SampleProfileReader(std::move(B), C, Underlying->getFormat()) {
+ Profiles = std::move(Underlying->getProfiles());
+ Summary = takeSummary(*Underlying);
+ // Keep the underlying reader alive; the profile data may contain
+ // StringRefs referencing names in its name table.
+ UnderlyingReader = std::move(Underlying);
+ }
+
+ /// Create a remapped sample profile from the given remapping file and
+ /// underlying samples.
+ static ErrorOr<std::unique_ptr<SampleProfileReader>>
+ create(const Twine &Filename, LLVMContext &C,
+ std::unique_ptr<SampleProfileReader> Underlying);
+
+ /// Read and validate the file header.
+ std::error_code readHeader() override { return sampleprof_error::success; }
+
+ /// Read remapping file and apply it to the sample profile.
+ std::error_code read() override;
+
+ /// Return the samples collected for function \p F.
+ FunctionSamples *getSamplesFor(StringRef FunctionName) override;
+ using SampleProfileReader::getSamplesFor;
+
+private:
+ SymbolRemappingReader Remappings;
+ DenseMap<SymbolRemappingReader::Key, FunctionSamples*> SampleMap;
+ std::unique_ptr<SampleProfileReader> UnderlyingReader;
+};
+
} // end namespace sampleprof
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h b/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h
index 74dc839ff049..d5ac6e53e4f7 100644
--- a/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h
@@ -42,7 +42,7 @@ public:
/// Write all the sample profiles in the given map of samples.
///
/// \returns status code of the file update operation.
- std::error_code write(const StringMap<FunctionSamples> &ProfileMap);
+ virtual std::error_code write(const StringMap<FunctionSamples> &ProfileMap);
raw_ostream &getOutputStream() { return *OutputStream; }
@@ -103,14 +103,15 @@ private:
/// Sample-based profile writer (binary format).
class SampleProfileWriterBinary : public SampleProfileWriter {
public:
- std::error_code write(const FunctionSamples &S) override;
+ virtual std::error_code write(const FunctionSamples &S) override;
SampleProfileWriterBinary(std::unique_ptr<raw_ostream> &OS)
: SampleProfileWriter(OS) {}
protected:
virtual std::error_code writeNameTable() = 0;
virtual std::error_code writeMagicIdent() = 0;
- std::error_code writeHeader(const StringMap<FunctionSamples> &ProfileMap) override;
+ virtual std::error_code
+ writeHeader(const StringMap<FunctionSamples> &ProfileMap) override;
std::error_code writeSummary();
std::error_code writeNameIdx(StringRef FName);
std::error_code writeBody(const FunctionSamples &S);
@@ -135,12 +136,56 @@ protected:
virtual std::error_code writeMagicIdent() override;
};
+// CompactBinary is a compact format of binary profile which both reduces
+// the profile size and the load time needed when compiling. It has two
+// major difference with Binary format.
+// 1. It represents all the strings in name table using md5 hash.
+// 2. It saves a function offset table which maps function name index to
+// the offset of its function profile to the start of the binary profile,
+// so by using the function offset table, for those function profiles which
+// will not be needed when compiling a module, the profile reader does't
+// have to read them and it saves compile time if the profile size is huge.
+// The layout of the compact format is shown as follows:
+//
+// Part1: Profile header, the same as binary format, containing magic
+// number, version, summary, name table...
+// Part2: Function Offset Table Offset, which saves the position of
+// Part4.
+// Part3: Function profile collection
+// function1 profile start
+// ....
+// function2 profile start
+// ....
+// function3 profile start
+// ....
+// ......
+// Part4: Function Offset Table
+// function1 name index --> function1 profile start
+// function2 name index --> function2 profile start
+// function3 name index --> function3 profile start
+//
+// We need Part2 because profile reader can use it to find out and read
+// function offset table without reading Part3 first.
class SampleProfileWriterCompactBinary : public SampleProfileWriterBinary {
using SampleProfileWriterBinary::SampleProfileWriterBinary;
+public:
+ virtual std::error_code write(const FunctionSamples &S) override;
+ virtual std::error_code
+ write(const StringMap<FunctionSamples> &ProfileMap) override;
+
protected:
+ /// The table mapping from function name to the offset of its FunctionSample
+ /// towards profile start.
+ MapVector<StringRef, uint64_t> FuncOffsetTable;
+ /// The offset of the slot to be filled with the offset of FuncOffsetTable
+ /// towards profile start.
+ uint64_t TableOffset;
virtual std::error_code writeNameTable() override;
virtual std::error_code writeMagicIdent() override;
+ virtual std::error_code
+ writeHeader(const StringMap<FunctionSamples> &ProfileMap) override;
+ std::error_code writeFuncOffsetTable();
};
} // end namespace sampleprof
diff --git a/contrib/llvm/include/llvm/Support/AArch64TargetParser.def b/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
index 6772e5f9b734..e03297b7c3c3 100644
--- a/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
+++ b/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
@@ -40,6 +40,11 @@ AARCH64_ARCH("armv8.4-a", ARMV8_4A, "8.4-A", "v8.4a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
+AARCH64_ARCH("armv8.5-a", ARMV8_5A, "8.5-A", "v8.5a",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
+ AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
#undef AARCH64_ARCH
#ifndef AARCH64_ARCH_EXT_NAME
@@ -60,10 +65,16 @@ AARCH64_ARCH_EXT_NAME("dotprod", AArch64::AEK_DOTPROD, "+dotprod","-dotprod")
AARCH64_ARCH_EXT_NAME("fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8")
AARCH64_ARCH_EXT_NAME("simd", AArch64::AEK_SIMD, "+neon", "-neon")
AARCH64_ARCH_EXT_NAME("fp16", AArch64::AEK_FP16, "+fullfp16", "-fullfp16")
+AARCH64_ARCH_EXT_NAME("fp16fml", AArch64::AEK_FP16FML, "+fp16fml", "-fp16fml")
AARCH64_ARCH_EXT_NAME("profile", AArch64::AEK_PROFILE, "+spe", "-spe")
AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
AARCH64_ARCH_EXT_NAME("sve", AArch64::AEK_SVE, "+sve", "-sve")
AARCH64_ARCH_EXT_NAME("rcpc", AArch64::AEK_RCPC, "+rcpc", "-rcpc")
+AARCH64_ARCH_EXT_NAME("rng", AArch64::AEK_RAND, "+rand", "-rand")
+AARCH64_ARCH_EXT_NAME("memtag", AArch64::AEK_MTE, "+mte", "-mte")
+AARCH64_ARCH_EXT_NAME("ssbs", AArch64::AEK_SSBS, "+ssbs", "-ssbs")
+AARCH64_ARCH_EXT_NAME("sb", AArch64::AEK_SB, "+sb", "-sb")
+AARCH64_ARCH_EXT_NAME("predres", AArch64::AEK_PREDRES, "+predres", "-predres")
#undef AARCH64_ARCH_EXT_NAME
#ifndef AARCH64_CPU_NAME
@@ -91,8 +102,8 @@ AARCH64_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC))
-AARCH64_CPU_NAME("exynos-m4", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_CRC))
+AARCH64_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD))
AARCH64_CPU_NAME("falkor", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AArch64::AEK_RDM))
AARCH64_CPU_NAME("saphira", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
@@ -109,6 +120,9 @@ AARCH64_CPU_NAME("thunderxt81", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt83", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
+AARCH64_CPU_NAME("tsv110", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (AArch64::AEK_PROFILE | AArch64::AEK_FP16 | AArch64::AEK_FP16FML |
+ AArch64::AEK_DOTPROD))
// Invalid CPU
AARCH64_CPU_NAME("invalid", INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
#undef AARCH64_CPU_NAME
diff --git a/contrib/llvm/include/llvm/Support/AArch64TargetParser.h b/contrib/llvm/include/llvm/Support/AArch64TargetParser.h
new file mode 100644
index 000000000000..76b77d474428
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/AArch64TargetParser.h
@@ -0,0 +1,124 @@
+//===-- AArch64TargetParser - Parser for AArch64 features -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a target parser to recognise AArch64 hardware features
+// such as FPU/CPU/ARCH and extension names.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_AARCH64TARGETPARSERCOMMON_H
+#define LLVM_SUPPORT_AARCH64TARGETPARSERCOMMON_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ARMTargetParser.h"
+#include <vector>
+
+// FIXME:This should be made into class design,to avoid dupplication.
+namespace llvm {
+namespace AArch64 {
+
+// Arch extension modifiers for CPUs.
+enum ArchExtKind : unsigned {
+ AEK_INVALID = 0,
+ AEK_NONE = 1,
+ AEK_CRC = 1 << 1,
+ AEK_CRYPTO = 1 << 2,
+ AEK_FP = 1 << 3,
+ AEK_SIMD = 1 << 4,
+ AEK_FP16 = 1 << 5,
+ AEK_PROFILE = 1 << 6,
+ AEK_RAS = 1 << 7,
+ AEK_LSE = 1 << 8,
+ AEK_SVE = 1 << 9,
+ AEK_DOTPROD = 1 << 10,
+ AEK_RCPC = 1 << 11,
+ AEK_RDM = 1 << 12,
+ AEK_SM4 = 1 << 13,
+ AEK_SHA3 = 1 << 14,
+ AEK_SHA2 = 1 << 15,
+ AEK_AES = 1 << 16,
+ AEK_FP16FML = 1 << 17,
+ AEK_RAND = 1 << 18,
+ AEK_MTE = 1 << 19,
+ AEK_SSBS = 1 << 20,
+ AEK_SB = 1 << 21,
+ AEK_PREDRES = 1 << 22,
+};
+
+enum class ArchKind {
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "AArch64TargetParser.def"
+};
+
+const ARM::ArchNames<ArchKind> AArch64ARCHNames[] = {
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, \
+ ARCH_BASE_EXT) \
+ {NAME, \
+ sizeof(NAME) - 1, \
+ CPU_ATTR, \
+ sizeof(CPU_ATTR) - 1, \
+ SUB_ARCH, \
+ sizeof(SUB_ARCH) - 1, \
+ ARM::FPUKind::ARCH_FPU, \
+ ARCH_BASE_EXT, \
+ AArch64::ArchKind::ID, \
+ ARCH_ATTR},
+#include "AArch64TargetParser.def"
+};
+
+const ARM::ExtName AArch64ARCHExtNames[] = {
+#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE) \
+ {NAME, sizeof(NAME) - 1, ID, FEATURE, NEGFEATURE},
+#include "AArch64TargetParser.def"
+};
+
+const ARM::CpuNames<ArchKind> AArch64CPUNames[] = {
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
+ {NAME, sizeof(NAME) - 1, AArch64::ArchKind::ID, IS_DEFAULT, DEFAULT_EXT},
+#include "AArch64TargetParser.def"
+};
+
+const ArchKind ArchKinds[] = {
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) \
+ ArchKind::ID,
+#include "AArch64TargetParser.def"
+};
+
+// FIXME: These should be moved to TargetTuple once it exists
+bool getExtensionFeatures(unsigned Extensions,
+ std::vector<StringRef> &Features);
+bool getArchFeatures(ArchKind AK, std::vector<StringRef> &Features);
+
+StringRef getArchName(ArchKind AK);
+unsigned getArchAttr(ArchKind AK);
+StringRef getCPUAttr(ArchKind AK);
+StringRef getSubArch(ArchKind AK);
+StringRef getArchExtName(unsigned ArchExtKind);
+StringRef getArchExtFeature(StringRef ArchExt);
+
+// Information by Name
+unsigned getDefaultFPU(StringRef CPU, ArchKind AK);
+unsigned getDefaultExtensions(StringRef CPU, ArchKind AK);
+StringRef getDefaultCPU(StringRef Arch);
+ArchKind getCPUArchKind(StringRef CPU);
+
+// Parser
+ArchKind parseArch(StringRef Arch);
+ArchExtKind parseArchExt(StringRef ArchExt);
+ArchKind parseCPUArch(StringRef CPU);
+// Used by target parser tests
+void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
+
+bool isX18ReservedByDefault(const Triple &TT);
+
+} // namespace AArch64
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/AMDGPUMetadata.h b/contrib/llvm/include/llvm/Support/AMDGPUMetadata.h
index 667fb3f3da43..84851c07499d 100644
--- a/contrib/llvm/include/llvm/Support/AMDGPUMetadata.h
+++ b/contrib/llvm/include/llvm/Support/AMDGPUMetadata.h
@@ -431,6 +431,21 @@ std::error_code fromString(std::string String, Metadata &HSAMetadata);
/// Converts \p HSAMetadata to \p String.
std::error_code toString(Metadata HSAMetadata, std::string &String);
+//===----------------------------------------------------------------------===//
+// HSA metadata for v3 code object.
+//===----------------------------------------------------------------------===//
+namespace V3 {
+/// HSA metadata major version.
+constexpr uint32_t VersionMajor = 1;
+/// HSA metadata minor version.
+constexpr uint32_t VersionMinor = 0;
+
+/// HSA metadata beginning assembler directive.
+constexpr char AssemblerDirectiveBegin[] = ".amdgpu_metadata";
+/// HSA metadata ending assembler directive.
+constexpr char AssemblerDirectiveEnd[] = ".end_amdgpu_metadata";
+} // end namespace V3
+
} // end namespace HSAMD
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/Support/ARMTargetParser.def b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
index 78f5410fb733..9e844e2b464d 100644
--- a/contrib/llvm/include/llvm/Support/ARMTargetParser.def
+++ b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
@@ -106,6 +106,11 @@ ARM_ARCH("armv8.4-a", ARMV8_4A, "8.4-A", "v8.4a",
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
ARM::AEK_DOTPROD))
+ARM_ARCH("armv8.5-a", ARMV8_5A, "8.5-A", "v8.5a",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
+ ARM::AEK_DOTPROD))
ARM_ARCH("armv8-r", ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
FK_NEON_FP_ARMV8,
(ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
@@ -152,6 +157,8 @@ ARM_ARCH_EXT_NAME("iwmmxt", ARM::AEK_IWMMXT, nullptr, nullptr)
ARM_ARCH_EXT_NAME("iwmmxt2", ARM::AEK_IWMMXT2, nullptr, nullptr)
ARM_ARCH_EXT_NAME("maverick", ARM::AEK_MAVERICK, nullptr, nullptr)
ARM_ARCH_EXT_NAME("xscale", ARM::AEK_XSCALE, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("fp16fml", ARM::AEK_FP16FML, "+fp16fml", "-fp16fml")
+ARM_ARCH_EXT_NAME("sb", ARM::AEK_SB, "+sb", "-sb")
#undef ARM_ARCH_EXT_NAME
#ifndef ARM_HW_DIV_NAME
@@ -202,10 +209,9 @@ ARM_CPU_NAME("arm926ej-s", ARMV5TEJ, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136j-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136jf-s", ARMV6, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136jz-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
-ARM_CPU_NAME("arm1176j-s", ARMV6K, FK_NONE, true, ARM::AEK_NONE)
-ARM_CPU_NAME("arm1176jz-s", ARMV6KZ, FK_NONE, false, ARM::AEK_NONE)
-ARM_CPU_NAME("mpcore", ARMV6K, FK_VFPV2, false, ARM::AEK_NONE)
+ARM_CPU_NAME("mpcore", ARMV6K, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("mpcorenovfp", ARMV6K, FK_NONE, false, ARM::AEK_NONE)
+ARM_CPU_NAME("arm1176jz-s", ARMV6KZ, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1176jzf-s", ARMV6KZ, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2-s", ARMV6T2, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2f-s", ARMV6T2, FK_VFPV2, false, ARM::AEK_NONE)
@@ -260,7 +266,8 @@ ARM_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m1", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m2", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
-ARM_CPU_NAME("exynos-m4", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
// Non-standard Arch names.
ARM_CPU_NAME("iwmmxt", IWMMXT, FK_NONE, true, ARM::AEK_NONE)
diff --git a/contrib/llvm/include/llvm/Support/ARMTargetParser.h b/contrib/llvm/include/llvm/Support/ARMTargetParser.h
new file mode 100644
index 000000000000..71acc0dc72d0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ARMTargetParser.h
@@ -0,0 +1,264 @@
+//===-- ARMTargetParser - Parser for ARM target features --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a target parser to recognise ARM hardware features
+// such as FPU/CPU/ARCH/extensions and specific support such as HWDIV.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMTARGETPARSER_H
+#define LLVM_SUPPORT_ARMTARGETPARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include <vector>
+
+namespace llvm {
+namespace ARM {
+
+// Arch extension modifiers for CPUs.
+// Note that this is not the same as the AArch64 list
+enum ArchExtKind : unsigned {
+ AEK_INVALID = 0,
+ AEK_NONE = 1,
+ AEK_CRC = 1 << 1,
+ AEK_CRYPTO = 1 << 2,
+ AEK_FP = 1 << 3,
+ AEK_HWDIVTHUMB = 1 << 4,
+ AEK_HWDIVARM = 1 << 5,
+ AEK_MP = 1 << 6,
+ AEK_SIMD = 1 << 7,
+ AEK_SEC = 1 << 8,
+ AEK_VIRT = 1 << 9,
+ AEK_DSP = 1 << 10,
+ AEK_FP16 = 1 << 11,
+ AEK_RAS = 1 << 12,
+ AEK_SVE = 1 << 13,
+ AEK_DOTPROD = 1 << 14,
+ AEK_SHA2 = 1 << 15,
+ AEK_AES = 1 << 16,
+ AEK_FP16FML = 1 << 17,
+ AEK_SB = 1 << 18,
+ // Unsupported extensions.
+ AEK_OS = 0x8000000,
+ AEK_IWMMXT = 0x10000000,
+ AEK_IWMMXT2 = 0x20000000,
+ AEK_MAVERICK = 0x40000000,
+ AEK_XSCALE = 0x80000000,
+};
+
+// List of Arch Extension names.
+// FIXME: TableGen this.
+struct ExtName {
+ const char *NameCStr;
+ size_t NameLength;
+ unsigned ID;
+ const char *Feature;
+ const char *NegFeature;
+
+ StringRef getName() const { return StringRef(NameCStr, NameLength); }
+};
+
+const ExtName ARCHExtNames[] = {
+#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE) \
+ {NAME, sizeof(NAME) - 1, ID, FEATURE, NEGFEATURE},
+#include "ARMTargetParser.def"
+};
+
+// List of HWDiv names (use getHWDivSynonym) and which architectural
+// features they correspond to (use getHWDivFeatures).
+// FIXME: TableGen this.
+const struct {
+ const char *NameCStr;
+ size_t NameLength;
+ unsigned ID;
+
+ StringRef getName() const { return StringRef(NameCStr, NameLength); }
+} HWDivNames[] = {
+#define ARM_HW_DIV_NAME(NAME, ID) {NAME, sizeof(NAME) - 1, ID},
+#include "ARMTargetParser.def"
+};
+
+// Arch names.
+enum class ArchKind {
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "ARMTargetParser.def"
+};
+
+// List of CPU names and their arches.
+// The same CPU can have multiple arches and can be default on multiple arches.
+// When finding the Arch for a CPU, first-found prevails. Sort them accordingly.
+// When this becomes table-generated, we'd probably need two tables.
+// FIXME: TableGen this.
+template <typename T> struct CpuNames {
+ const char *NameCStr;
+ size_t NameLength;
+ T ArchID;
+ bool Default; // is $Name the default CPU for $ArchID ?
+ unsigned DefaultExtensions;
+
+ StringRef getName() const { return StringRef(NameCStr, NameLength); }
+};
+
+const CpuNames<ArchKind> CPUNames[] = {
+#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
+ {NAME, sizeof(NAME) - 1, ARM::ArchKind::ID, IS_DEFAULT, DEFAULT_EXT},
+#include "ARMTargetParser.def"
+};
+
+// FPU names.
+enum FPUKind {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) KIND,
+#include "ARMTargetParser.def"
+ FK_LAST
+};
+
+// FPU Version
+enum class FPUVersion {
+ NONE,
+ VFPV2,
+ VFPV3,
+ VFPV3_FP16,
+ VFPV4,
+ VFPV5
+};
+
+// An FPU name restricts the FPU in one of three ways:
+enum class FPURestriction {
+ None = 0, ///< No restriction
+ D16, ///< Only 16 D registers
+ SP_D16 ///< Only single-precision instructions, with 16 D registers
+};
+
+// An FPU name implies one of three levels of Neon support:
+enum class NeonSupportLevel {
+ None = 0, ///< No Neon
+ Neon, ///< Neon
+ Crypto ///< Neon with Crypto
+};
+
+// ISA kinds.
+enum class ISAKind { INVALID = 0, ARM, THUMB, AARCH64 };
+
+// Endianness
+// FIXME: BE8 vs. BE32?
+enum class EndianKind { INVALID = 0, LITTLE, BIG };
+
+// v6/v7/v8 Profile
+enum class ProfileKind { INVALID = 0, A, R, M };
+
+// List of canonical FPU names (use getFPUSynonym) and which architectural
+// features they correspond to (use getFPUFeatures).
+// FIXME: TableGen this.
+// The entries must appear in the order listed in ARM::FPUKind for correct
+// indexing
+struct FPUName {
+ const char *NameCStr;
+ size_t NameLength;
+ FPUKind ID;
+ FPUVersion FPUVer;
+ NeonSupportLevel NeonSupport;
+ FPURestriction Restriction;
+
+ StringRef getName() const { return StringRef(NameCStr, NameLength); }
+};
+
+static const FPUName FPUNames[] = {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) \
+ {NAME, sizeof(NAME) - 1, KIND, VERSION, NEON_SUPPORT, RESTRICTION},
+#include "llvm/Support/ARMTargetParser.def"
+};
+
+// List of canonical arch names (use getArchSynonym).
+// This table also provides the build attribute fields for CPU arch
+// and Arch ID, according to the Addenda to the ARM ABI, chapters
+// 2.4 and 2.3.5.2 respectively.
+// FIXME: SubArch values were simplified to fit into the expectations
+// of the triples and are not conforming with their official names.
+// Check to see if the expectation should be changed.
+// FIXME: TableGen this.
+template <typename T> struct ArchNames {
+ const char *NameCStr;
+ size_t NameLength;
+ const char *CPUAttrCStr;
+ size_t CPUAttrLength;
+ const char *SubArchCStr;
+ size_t SubArchLength;
+ unsigned DefaultFPU;
+ unsigned ArchBaseExtensions;
+ T ID;
+ ARMBuildAttrs::CPUArch ArchAttr; // Arch ID in build attributes.
+
+ StringRef getName() const { return StringRef(NameCStr, NameLength); }
+
+ // CPU class in build attributes.
+ StringRef getCPUAttr() const { return StringRef(CPUAttrCStr, CPUAttrLength); }
+
+ // Sub-Arch name.
+ StringRef getSubArch() const { return StringRef(SubArchCStr, SubArchLength); }
+};
+
+static const ArchNames<ArchKind> ARCHNames[] = {
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, \
+ ARCH_BASE_EXT) \
+ {NAME, sizeof(NAME) - 1, \
+ CPU_ATTR, sizeof(CPU_ATTR) - 1, \
+ SUB_ARCH, sizeof(SUB_ARCH) - 1, \
+ ARCH_FPU, ARCH_BASE_EXT, \
+ ArchKind::ID, ARCH_ATTR},
+#include "llvm/Support/ARMTargetParser.def"
+};
+
+// Information by ID
+StringRef getFPUName(unsigned FPUKind);
+FPUVersion getFPUVersion(unsigned FPUKind);
+NeonSupportLevel getFPUNeonSupportLevel(unsigned FPUKind);
+FPURestriction getFPURestriction(unsigned FPUKind);
+
+// FIXME: These should be moved to TargetTuple once it exists
+bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
+bool getHWDivFeatures(unsigned HWDivKind, std::vector<StringRef> &Features);
+bool getExtensionFeatures(unsigned Extensions,
+ std::vector<StringRef> &Features);
+
+StringRef getArchName(ArchKind AK);
+unsigned getArchAttr(ArchKind AK);
+StringRef getCPUAttr(ArchKind AK);
+StringRef getSubArch(ArchKind AK);
+StringRef getArchExtName(unsigned ArchExtKind);
+StringRef getArchExtFeature(StringRef ArchExt);
+StringRef getHWDivName(unsigned HWDivKind);
+
+// Information by Name
+unsigned getDefaultFPU(StringRef CPU, ArchKind AK);
+unsigned getDefaultExtensions(StringRef CPU, ArchKind AK);
+StringRef getDefaultCPU(StringRef Arch);
+StringRef getCanonicalArchName(StringRef Arch);
+StringRef getFPUSynonym(StringRef FPU);
+StringRef getArchSynonym(StringRef Arch);
+
+// Parser
+unsigned parseHWDiv(StringRef HWDiv);
+unsigned parseFPU(StringRef FPU);
+ArchKind parseArch(StringRef Arch);
+unsigned parseArchExt(StringRef ArchExt);
+ArchKind parseCPUArch(StringRef CPU);
+ISAKind parseArchISA(StringRef Arch);
+EndianKind parseArchEndian(StringRef Arch);
+ProfileKind parseArchProfile(StringRef Arch);
+unsigned parseArchVersion(StringRef Arch);
+
+void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
+StringRef computeDefaultTargetABI(const Triple &TT, StringRef CPU);
+
+} // namespace ARM
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ARMWinEH.h b/contrib/llvm/include/llvm/Support/ARMWinEH.h
index 1463629f45dc..60174503ad49 100644
--- a/contrib/llvm/include/llvm/Support/ARMWinEH.h
+++ b/contrib/llvm/include/llvm/Support/ARMWinEH.h
@@ -207,6 +207,8 @@ std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
/// ExceptionDataRecord - An entry in the table of exception data (.xdata)
///
+/// The format on ARM is:
+///
/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +-------+---------+-+-+-+---+-----------------------------------+
@@ -215,6 +217,16 @@ std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
/// | Reserved |Ex. Code Words| (Extended Epilogue Count) |
/// +-------+--------+--------------+-------------------------------+
///
+/// The format on ARM64 is:
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------+---------+-+-+---+-----------------------------------+
+/// | C Wrd | Epi Cnt |E|X|Ver| Function Length |
+/// +---------+------+--'-'-'---'---+-------------------------------+
+/// | Reserved |Ex. Code Words| (Extended Epilogue Count) |
+/// +-------+--------+--------------+-------------------------------+
+///
/// Function Length : 18-bit field indicating the total length of the function
/// in bytes divided by 2. If a function is larger than
/// 512KB, then multiple pdata and xdata records must be used.
@@ -225,7 +237,7 @@ std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
/// header
/// F : 1-bit field indicating that the record describes a function fragment
/// (implies that no prologue is present, and prologue processing should be
-/// skipped)
+/// skipped) (ARM only)
/// Epilogue Count : 5-bit field that differs in meaning based on the E field.
///
/// If E is set, then this field specifies the index of the
@@ -235,33 +247,43 @@ std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
/// scopes. If more than 31 scopes exist, then this field and
/// the Code Words field must both be set to 0 to indicate that
/// an extension word is required.
-/// Code Words : 4-bit field that species the number of 32-bit words needed to
-/// contain all the unwind codes. If more than 15 words (63 code
-/// bytes) are required, then this field and the Epilogue Count
-/// field must both be set to 0 to indicate that an extension word
-/// is required.
+/// Code Words : 4-bit (5-bit on ARM64) field that specifies the number of
+/// 32-bit words needed to contain all the unwind codes. If more
+/// than 15 words (31 words on ARM64) are required, then this field
+/// and the Epilogue Count field must both be set to 0 to indicate
+/// that an extension word is required.
/// Extended Epilogue Count, Extended Code Words :
/// Valid only if Epilog Count and Code Words are both
/// set to 0. Provides an 8-bit extended code word
/// count and 16-bits for epilogue count
///
+/// The epilogue scope format on ARM is:
+///
/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +----------------+------+---+---+-------------------------------+
/// | Ep Start Idx | Cond |Res| Epilogue Start Offset |
/// +----------------+------+---+-----------------------------------+
///
+/// The epilogue scope format on ARM64 is:
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +-------------------+-------+---+-------------------------------+
+/// | Ep Start Idx | Res | Epilogue Start Offset |
+/// +-------------------+-------+-----------------------------------+
+///
/// If the E bit is unset in the header, the header is followed by a series of
/// epilogue scopes, which are sorted by their offset.
///
/// Epilogue Start Offset: 18-bit field encoding the offset of epilogue relative
/// to the start of the function in bytes divided by two
/// Res : 2-bit field reserved for future expansion (must be set to 0)
-/// Condition : 4-bit field providing the condition under which the epilogue is
-/// executed. Unconditional epilogues should set this field to 0xe.
-/// Epilogues must be entirely conditional or unconditional, and in
-/// Thumb-2 mode. The epilogue beings with the first instruction
-/// after the IT opcode.
+/// Condition : (ARM only) 4-bit field providing the condition under which the
+/// epilogue is executed. Unconditional epilogues should set this
+/// field to 0xe. Epilogues must be entirely conditional or
+/// unconditional, and in Thumb-2 mode. The epilogue begins with
+/// the first instruction after the IT opcode.
/// Epilogue Start Index : 8-bit field indicating the byte index of the first
/// unwind code describing the epilogue
///
@@ -293,18 +315,33 @@ struct EpilogueScope {
const support::ulittle32_t ES;
EpilogueScope(const support::ulittle32_t Data) : ES(Data) {}
+ // Same for both ARM and AArch64.
uint32_t EpilogueStartOffset() const {
return (ES & 0x0003ffff);
}
- uint8_t Res() const {
+
+ // Different implementations for ARM and AArch64.
+ uint8_t ResARM() const {
return ((ES & 0x000c0000) >> 18);
}
+
+ uint8_t ResAArch64() const {
+ return ((ES & 0x000f0000) >> 18);
+ }
+
+ // Condition is only applicable to ARM.
uint8_t Condition() const {
return ((ES & 0x00f00000) >> 20);
}
- uint8_t EpilogueStartIndex() const {
+
+ // Different implementations for ARM and AArch64.
+ uint8_t EpilogueStartIndexARM() const {
return ((ES & 0xff000000) >> 24);
}
+
+ uint16_t EpilogueStartIndexAArch64() const {
+ return ((ES & 0xffc00000) >> 22);
+ }
};
struct ExceptionDataRecord;
@@ -312,13 +349,23 @@ inline size_t HeaderWords(const ExceptionDataRecord &XR);
struct ExceptionDataRecord {
const support::ulittle32_t *Data;
+ bool isAArch64;
- ExceptionDataRecord(const support::ulittle32_t *Data) : Data(Data) {}
+ ExceptionDataRecord(const support::ulittle32_t *Data, bool isAArch64) :
+ Data(Data), isAArch64(isAArch64) {}
uint32_t FunctionLength() const {
return (Data[0] & 0x0003ffff);
}
+ uint32_t FunctionLengthInBytesARM() const {
+ return FunctionLength() << 1;
+ }
+
+ uint32_t FunctionLengthInBytesAArch64() const {
+ return FunctionLength() << 2;
+ }
+
uint8_t Vers() const {
return (Data[0] & 0x000C0000) >> 18;
}
@@ -332,18 +379,25 @@ struct ExceptionDataRecord {
}
bool F() const {
+ assert(!isAArch64 && "Fragments are only supported on ARMv7 WinEH");
return ((Data[0] & 0x00400000) >> 22);
}
uint8_t EpilogueCount() const {
- if (HeaderWords(*this) == 1)
+ if (HeaderWords(*this) == 1) {
+ if (isAArch64)
+ return (Data[0] & 0x07C00000) >> 22;
return (Data[0] & 0x0f800000) >> 23;
+ }
return Data[1] & 0x0000ffff;
}
uint8_t CodeWords() const {
- if (HeaderWords(*this) == 1)
+ if (HeaderWords(*this) == 1) {
+ if (isAArch64)
+ return (Data[0] & 0xf8000000) >> 27;
return (Data[0] & 0xf0000000) >> 28;
+ }
return (Data[1] & 0x00ff0000) >> 16;
}
@@ -373,6 +427,8 @@ struct ExceptionDataRecord {
};
inline size_t HeaderWords(const ExceptionDataRecord &XR) {
+ if (XR.isAArch64)
+ return (XR.Data[0] & 0xffc00000) ? 1 : 2;
return (XR.Data[0] & 0xff800000) ? 1 : 2;
}
}
diff --git a/contrib/llvm/include/llvm/Support/Allocator.h b/contrib/llvm/include/llvm/Support/Allocator.h
index 184ac491b1f1..42d08378a677 100644
--- a/contrib/llvm/include/llvm/Support/Allocator.h
+++ b/contrib/llvm/include/llvm/Support/Allocator.h
@@ -21,6 +21,7 @@
#ifndef LLVM_SUPPORT_ALLOCATOR_H
#define LLVM_SUPPORT_ALLOCATOR_H
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
@@ -283,6 +284,60 @@ public:
size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
+ /// \return An index uniquely and reproducibly identifying
+ /// an input pointer \p Ptr in the given allocator.
+ /// The returned value is negative iff the object is inside a custom-size
+ /// slab.
+ /// Returns an empty optional if the pointer is not found in the allocator.
+ llvm::Optional<int64_t> identifyObject(const void *Ptr) {
+ const char *P = static_cast<const char *>(Ptr);
+ int64_t InSlabIdx = 0;
+ for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
+ const char *S = static_cast<const char *>(Slabs[Idx]);
+ if (P >= S && P < S + computeSlabSize(Idx))
+ return InSlabIdx + static_cast<int64_t>(P - S);
+ InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
+ }
+
+ // Use negative index to denote custom sized slabs.
+ int64_t InCustomSizedSlabIdx = -1;
+ for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
+ const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
+ size_t Size = CustomSizedSlabs[Idx].second;
+ if (P >= S && P < S + Size)
+ return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
+ InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
+ }
+ return None;
+ }
+
+ /// A wrapper around identifyObject that additionally asserts that
+ /// the object is indeed within the allocator.
+ /// \return An index uniquely and reproducibly identifying
+ /// an input pointer \p Ptr in the given allocator.
+ int64_t identifyKnownObject(const void *Ptr) {
+ Optional<int64_t> Out = identifyObject(Ptr);
+ assert(Out && "Wrong allocator used");
+ return *Out;
+ }
+
+ /// A wrapper around identifyKnownObject. Accepts type information
+ /// about the object and produces a smaller identifier by relying on
+ /// the alignment information. Note that sub-classes may have different
+ /// alignment, so the most base class should be passed as template parameter
+ /// in order to obtain correct results. For that reason automatic template
+ /// parameter deduction is disabled.
+ /// \return An index uniquely and reproducibly identifying
+ /// an input pointer \p Ptr in the given allocator. This identifier is
+ /// different from the ones produced by identifyObject and
+ /// identifyAlignedObject.
+ template <typename T>
+ int64_t identifyKnownAlignedObject(const void *Ptr) {
+ int64_t Out = identifyKnownObject(Ptr);
+ assert(Out % alignof(T) == 0 && "Wrong alignment information");
+ return Out / alignof(T);
+ }
+
size_t getTotalMemory() const {
size_t TotalMemory = 0;
for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
diff --git a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
index d1571cb37fc6..7c110fcb6a4b 100644
--- a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
+++ b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
@@ -96,21 +96,32 @@ public:
explicit VarStreamArray(const Extractor &E) : E(E) {}
- explicit VarStreamArray(BinaryStreamRef Stream) : Stream(Stream) {}
+ explicit VarStreamArray(BinaryStreamRef Stream, uint32_t Skew = 0)
+ : Stream(Stream), Skew(Skew) {}
- VarStreamArray(BinaryStreamRef Stream, const Extractor &E)
- : Stream(Stream), E(E) {}
+ VarStreamArray(BinaryStreamRef Stream, const Extractor &E, uint32_t Skew = 0)
+ : Stream(Stream), E(E), Skew(Skew) {}
Iterator begin(bool *HadError = nullptr) const {
- return Iterator(*this, E, HadError);
+ return Iterator(*this, E, Skew, nullptr);
}
bool valid() const { return Stream.valid(); }
+ uint32_t skew() const { return Skew; }
Iterator end() const { return Iterator(E); }
bool empty() const { return Stream.getLength() == 0; }
+ VarStreamArray<ValueType, Extractor> substream(uint32_t Begin,
+ uint32_t End) const {
+ assert(Begin >= Skew);
+ // We should never cut off the beginning of the stream since it might be
+ // skewed, meaning the initial bytes are important.
+ BinaryStreamRef NewStream = Stream.slice(0, End);
+ return {NewStream, E, Begin};
+ }
+
/// given an offset into the array's underlying stream, return an
/// iterator to the record at that offset. This is considered unsafe
/// since the behavior is undefined if \p Offset does not refer to the
@@ -123,11 +134,17 @@ public:
Extractor &getExtractor() { return E; }
BinaryStreamRef getUnderlyingStream() const { return Stream; }
- void setUnderlyingStream(BinaryStreamRef S) { Stream = S; }
+ void setUnderlyingStream(BinaryStreamRef S, uint32_t Skew = 0) {
+ Stream = S;
+ this->Skew = Skew;
+ }
+
+ void drop_front() { Skew += begin()->length(); }
private:
BinaryStreamRef Stream;
Extractor E;
+ uint32_t Skew;
};
template <typename ValueType, typename Extractor>
@@ -139,10 +156,6 @@ class VarStreamArrayIterator
public:
VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
- bool *HadError)
- : VarStreamArrayIterator(Array, E, 0, HadError) {}
-
- VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
uint32_t Offset, bool *HadError)
: IterRef(Array.Stream.drop_front(Offset)), Extract(E),
Array(&Array), AbsOffset(Offset), HadError(HadError) {
diff --git a/contrib/llvm/include/llvm/Support/BinaryStreamReader.h b/contrib/llvm/include/llvm/Support/BinaryStreamReader.h
index fe77b550c453..392958de30d5 100644
--- a/contrib/llvm/include/llvm/Support/BinaryStreamReader.h
+++ b/contrib/llvm/include/llvm/Support/BinaryStreamReader.h
@@ -203,11 +203,12 @@ public:
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
template <typename T, typename U>
- Error readArray(VarStreamArray<T, U> &Array, uint32_t Size) {
+ Error readArray(VarStreamArray<T, U> &Array, uint32_t Size,
+ uint32_t Skew = 0) {
BinaryStreamRef S;
if (auto EC = readStreamRef(S, Size))
return EC;
- Array.setUnderlyingStream(S);
+ Array.setUnderlyingStream(S, Skew);
return Error::success();
}
diff --git a/contrib/llvm/include/llvm/Support/BuryPointer.h b/contrib/llvm/include/llvm/Support/BuryPointer.h
new file mode 100644
index 000000000000..53f1f395b922
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/BuryPointer.h
@@ -0,0 +1,30 @@
+//===- llvm/Support/BuryPointer.h - Memory Manipulation/Leak ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BURYPOINTER_H
+#define LLVM_SUPPORT_BURYPOINTER_H
+
+#include <memory>
+
+namespace llvm {
+
+// In tools that will exit soon anyway, going through the process of explicitly
+// deallocating resources can be unnecessary - better to leak the resources and
+// let the OS clean them up when the process ends. Use this function to ensure
+// the memory is not misdiagnosed as an unintentional leak by leak detection
+// tools (this is achieved by preserving pointers to the object in a globally
+// visible array).
+void BuryPointer(const void *Ptr);
+template <typename T> void BuryPointer(std::unique_ptr<T> Ptr) {
+ BuryPointer(Ptr.release());
+}
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CFGUpdate.h b/contrib/llvm/include/llvm/Support/CFGUpdate.h
new file mode 100644
index 000000000000..63c24a3d2a20
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/CFGUpdate.h
@@ -0,0 +1,118 @@
+//===- CFGUpdate.h - Encode a CFG Edge Update. ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CFG Edge Update: Insert or Delete, and two Nodes as the
+// Edge ends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CFGUPDATE_H
+#define LLVM_SUPPORT_CFGUPDATE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace cfg {
+enum class UpdateKind : unsigned char { Insert, Delete };
+
+template <typename NodePtr> class Update {
+ using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
+ NodePtr From;
+ NodeKindPair ToAndKind;
+
+public:
+ Update(UpdateKind Kind, NodePtr From, NodePtr To)
+ : From(From), ToAndKind(To, Kind) {}
+
+ UpdateKind getKind() const { return ToAndKind.getInt(); }
+ NodePtr getFrom() const { return From; }
+ NodePtr getTo() const { return ToAndKind.getPointer(); }
+ bool operator==(const Update &RHS) const {
+ return From == RHS.From && ToAndKind == RHS.ToAndKind;
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << (getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
+ getFrom()->printAsOperand(OS, false);
+ OS << " -> ";
+ getTo()->printAsOperand(OS, false);
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
+#endif
+};
+
+// LegalizeUpdates function simplifies updates assuming a graph structure.
+// This function serves double purpose:
+// a) It removes redundant updates, which makes it easier to reverse-apply
+// them when traversing CFG.
+// b) It optimizes away updates that cancel each other out, as the end result
+// is the same.
+template <typename NodePtr>
+void LegalizeUpdates(ArrayRef<Update<NodePtr>> AllUpdates,
+ SmallVectorImpl<Update<NodePtr>> &Result,
+ bool InverseGraph) {
+ // Count the total number of inserions of each edge.
+ // Each insertion adds 1 and deletion subtracts 1. The end number should be
+ // one of {-1 (deletion), 0 (NOP), +1 (insertion)}. Otherwise, the sequence
+ // of updates contains multiple updates of the same kind and we assert for
+ // that case.
+ SmallDenseMap<std::pair<NodePtr, NodePtr>, int, 4> Operations;
+ Operations.reserve(AllUpdates.size());
+
+ for (const auto &U : AllUpdates) {
+ NodePtr From = U.getFrom();
+ NodePtr To = U.getTo();
+ if (InverseGraph)
+ std::swap(From, To); // Reverse edge for postdominators.
+
+ Operations[{From, To}] += (U.getKind() == UpdateKind::Insert ? 1 : -1);
+ }
+
+ Result.clear();
+ Result.reserve(Operations.size());
+ for (auto &Op : Operations) {
+ const int NumInsertions = Op.second;
+ assert(std::abs(NumInsertions) <= 1 && "Unbalanced operations!");
+ if (NumInsertions == 0)
+ continue;
+ const UpdateKind UK =
+ NumInsertions > 0 ? UpdateKind::Insert : UpdateKind::Delete;
+ Result.push_back({UK, Op.first.first, Op.first.second});
+ }
+
+ // Make the order consistent by not relying on pointer values within the
+ // set. Reuse the old Operations map.
+ // In the future, we should sort by something else to minimize the amount
+ // of work needed to perform the series of updates.
+ for (size_t i = 0, e = AllUpdates.size(); i != e; ++i) {
+ const auto &U = AllUpdates[i];
+ if (!InverseGraph)
+ Operations[{U.getFrom(), U.getTo()}] = int(i);
+ else
+ Operations[{U.getTo(), U.getFrom()}] = int(i);
+ }
+
+ llvm::sort(Result,
+ [&Operations](const Update<NodePtr> &A, const Update<NodePtr> &B) {
+ return Operations[{A.getFrom(), A.getTo()}] >
+ Operations[{B.getFrom(), B.getTo()}];
+ });
+}
+
+} // end namespace cfg
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_CFGUPDATE_H
diff --git a/contrib/llvm/include/llvm/Support/Chrono.h b/contrib/llvm/include/llvm/Support/Chrono.h
index 994068af3771..57677e8d5cf1 100644
--- a/contrib/llvm/include/llvm/Support/Chrono.h
+++ b/contrib/llvm/include/llvm/Support/Chrono.h
@@ -47,6 +47,14 @@ toTimePoint(std::time_t T) {
return time_point_cast<seconds>(system_clock::from_time_t(T));
}
+/// Convert a std::time_t + nanoseconds to a TimePoint
+LLVM_ATTRIBUTE_ALWAYS_INLINE inline TimePoint<>
+toTimePoint(std::time_t T, uint32_t nsec) {
+ using namespace std::chrono;
+ return time_point_cast<nanoseconds>(system_clock::from_time_t(T))
+ + nanoseconds(nsec);
+}
+
} // namespace sys
raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);
diff --git a/contrib/llvm/include/llvm/Support/CodeGen.h b/contrib/llvm/include/llvm/Support/CodeGen.h
index 5f9e33129587..22e74167266c 100644
--- a/contrib/llvm/include/llvm/Support/CodeGen.h
+++ b/contrib/llvm/include/llvm/Support/CodeGen.h
@@ -25,7 +25,7 @@ namespace llvm {
// Code model types.
namespace CodeModel {
// Sync changes with CodeGenCWrappers.h.
- enum Model { Small, Kernel, Medium, Large };
+ enum Model { Tiny, Small, Kernel, Medium, Large };
}
namespace PICLevel {
@@ -57,6 +57,11 @@ namespace llvm {
};
}
+ // Specify effect of frame pointer elimination optimization.
+ namespace FramePointer {
+ enum FP {All, NonLeaf, None};
+ }
+
} // end llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm/include/llvm/Support/CommandLine.h
index 799b41fbf8b0..a8ad89384d17 100644
--- a/contrib/llvm/include/llvm/Support/CommandLine.h
+++ b/contrib/llvm/include/llvm/Support/CommandLine.h
@@ -56,9 +56,18 @@ namespace cl {
// Returns true on success. Otherwise, this will print the error message to
// stderr and exit if \p Errs is not set (nullptr by default), or print the
// error message to \p Errs and return false if \p Errs is provided.
+//
+// If EnvVar is not nullptr, command-line options are also parsed from the
+// environment variable named by EnvVar. Precedence is given to occurrences
+// from argv. This precedence is currently implemented by parsing argv after
+// the environment variable, so it is only implemented correctly for options
+// that give precedence to later occurrences. If your program supports options
+// that give precedence to earlier occurrences, you will need to extend this
+// function to support it correctly.
bool ParseCommandLineOptions(int argc, const char *const *argv,
StringRef Overview = "",
- raw_ostream *Errs = nullptr);
+ raw_ostream *Errs = nullptr,
+ const char *EnvVar = nullptr);
//===----------------------------------------------------------------------===//
// ParseEnvironmentOptions - Environment variable option processing alternate
@@ -147,6 +156,9 @@ enum OptionHidden { // Control whether -help shows this option
// enabled, and used, the value for the flag comes from the suffix of the
// argument.
//
+// AlwaysPrefix - Only allow the behavior enabled by the Prefix flag and reject
+// the Option=Value form.
+//
// Grouping - With this option enabled, multiple letter options are allowed to
// bunch together with only a single hyphen for the whole group. This allows
// emulation of the behavior that ls uses for example: ls -la === ls -l -a
@@ -156,7 +168,8 @@ enum FormattingFlags {
NormalFormatting = 0x00, // Nothing special
Positional = 0x01, // Is a positional argument, no '-' required
Prefix = 0x02, // Can this option directly prefix its value?
- Grouping = 0x03 // Can this option group with other options?
+ AlwaysPrefix = 0x03, // Can this option only directly prefix its value?
+ Grouping = 0x04 // Can this option group with other options?
};
enum MiscFlags { // Miscellaneous flags to adjust argument
@@ -256,7 +269,7 @@ class Option {
// detail representing the non-value
unsigned Value : 2;
unsigned HiddenFlag : 2; // enum OptionHidden
- unsigned Formatting : 2; // enum FormattingFlags
+ unsigned Formatting : 3; // enum FormattingFlags
unsigned Misc : 3;
unsigned Position = 0; // Position of last occurrence of the option
unsigned AdditionalVals = 0; // Greater than 0 for multi-valued option.
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
index 4de815fe61d7..14e4d6e97140 100644
--- a/contrib/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -133,6 +133,19 @@
#define LLVM_NODISCARD
#endif
+// Indicate that a non-static, non-const C++ member function reinitializes
+// the entire object to a known state, independent of the previous state of
+// the object.
+//
+// The clang-tidy check bugprone-use-after-move recognizes this attribute as a
+// marker that a moved-from object has left the indeterminate state and can be
+// reused.
+#if __has_cpp_attribute(clang::reinitializes)
+#define LLVM_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]]
+#else
+#define LLVM_ATTRIBUTE_REINITIALIZES
+#endif
+
// Some compilers warn about unused functions. When a function is sometimes
// used or not depending on build settings (e.g. a function only called from
// within "assert"), this attribute can be used to suppress such warnings.
@@ -519,7 +532,7 @@ namespace llvm {
/// reduced default alignment.
inline void *allocate_buffer(size_t Size, size_t Alignment) {
return ::operator new(Size
-#if __cpp_aligned_new
+#ifdef __cpp_aligned_new
,
std::align_val_t(Alignment)
#endif
@@ -535,11 +548,11 @@ inline void *allocate_buffer(size_t Size, size_t Alignment) {
/// most likely using the above helper.
inline void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment) {
::operator delete(Ptr
-#if __cpp_sized_deallocation
+#ifdef __cpp_sized_deallocation
,
Size
#endif
-#if __cpp_aligned_new
+#ifdef __cpp_aligned_new
,
std::align_val_t(Alignment)
#endif
diff --git a/contrib/llvm/include/llvm/Support/Compression.h b/contrib/llvm/include/llvm/Support/Compression.h
index 2d191abe4b1a..f7258f4bf8f8 100644
--- a/contrib/llvm/include/llvm/Support/Compression.h
+++ b/contrib/llvm/include/llvm/Support/Compression.h
@@ -23,17 +23,15 @@ class StringRef;
namespace zlib {
-enum CompressionLevel {
- NoCompression,
- DefaultCompression,
- BestSpeedCompression,
- BestSizeCompression
-};
+static constexpr int NoCompression = 0;
+static constexpr int BestSpeedCompression = 1;
+static constexpr int DefaultCompression = 6;
+static constexpr int BestSizeCompression = 9;
bool isAvailable();
Error compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
- CompressionLevel Level = DefaultCompression);
+ int Level = DefaultCompression);
Error uncompress(StringRef InputBuffer, char *UncompressedBuffer,
size_t &UncompressedSize);
@@ -49,4 +47,3 @@ uint32_t crc32(StringRef Buffer);
} // End of namespace llvm
#endif
-
diff --git a/contrib/llvm/include/llvm/Support/Debug.h b/contrib/llvm/include/llvm/Support/Debug.h
index 980abfb0e8da..df86dbb82414 100644
--- a/contrib/llvm/include/llvm/Support/Debug.h
+++ b/contrib/llvm/include/llvm/Support/Debug.h
@@ -94,6 +94,10 @@ extern bool VerifyDomInfo;
///
extern bool VerifyLoopInfo;
+/// Enables verification of MemorySSA.
+///
+extern bool VerifyMemorySSA;
+
///\}
/// EnableDebugBuffering - This defaults to false. If true, the debug
diff --git a/contrib/llvm/include/llvm/Support/DebugCounter.h b/contrib/llvm/include/llvm/Support/DebugCounter.h
index 83bd5a06c94a..6eadd5c6aeff 100644
--- a/contrib/llvm/include/llvm/Support/DebugCounter.h
+++ b/contrib/llvm/include/llvm/Support/DebugCounter.h
@@ -55,6 +55,8 @@ namespace llvm {
class DebugCounter {
public:
+ ~DebugCounter();
+
/// Returns a reference to the singleton instance.
static DebugCounter &instance();
diff --git a/contrib/llvm/include/llvm/Support/Error.h b/contrib/llvm/include/llvm/Support/Error.h
index 8015cab45a06..ee2cbeec97a8 100644
--- a/contrib/llvm/include/llvm/Support/Error.h
+++ b/contrib/llvm/include/llvm/Support/Error.h
@@ -14,8 +14,9 @@
#ifndef LLVM_SUPPORT_ERROR_H
#define LLVM_SUPPORT_ERROR_H
-#include "llvm/ADT/SmallVector.h"
+#include "llvm-c/Error.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/abi-breaking.h"
@@ -155,9 +156,10 @@ private:
/// they're moved-assigned or constructed from Success values that have already
/// been checked. This enforces checking through all levels of the call stack.
class LLVM_NODISCARD Error {
- // ErrorList needs to be able to yank ErrorInfoBase pointers out of this
- // class to add to the error list.
+ // Both ErrorList and FileError need to be able to yank ErrorInfoBase
+ // pointers out of this class to add to the error list.
friend class ErrorList;
+ friend class FileError;
// handleErrors needs to be able to set the Checked flag.
template <typename... HandlerTs>
@@ -167,6 +169,9 @@ class LLVM_NODISCARD Error {
// error.
template <typename T> friend class Expected;
+ // wrap needs to be able to steal the payload.
+ friend LLVMErrorRef wrap(Error);
+
protected:
/// Create a success value. Prefer using 'Error::success()' for readability
Error() {
@@ -317,7 +322,7 @@ private:
/// Subclass of Error for the sole purpose of identifying the success path in
/// the type system. This allows to catch invalid conversion to Expected<T> at
/// compile time.
-class ErrorSuccess : public Error {};
+class ErrorSuccess final : public Error {};
inline ErrorSuccess Error::success() { return ErrorSuccess(); }
@@ -339,6 +344,8 @@ template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&... Args) {
template <typename ThisErrT, typename ParentErrT = ErrorInfoBase>
class ErrorInfo : public ParentErrT {
public:
+ using ParentErrT::ParentErrT; // inherit constructors
+
static const void *classID() { return &ThisErrT::ID; }
const void *dynamicClassID() const override { return &ThisErrT::ID; }
@@ -946,10 +953,14 @@ Expected<T> handleExpected(Expected<T> ValOrErr, RecoveryFtor &&RecoveryPath,
/// will be printed before the first one is logged. A newline will be printed
/// after each error.
///
+/// This function is compatible with the helpers from Support/WithColor.h. You
+/// can pass any of them as the OS. Please consider using them instead of
+/// including 'error: ' in the ErrorBanner.
+///
/// This is useful in the base level of your program to allow clean termination
/// (allowing clean deallocation of resources, etc.), while reporting error
/// information to the user.
-void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner);
+void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner = {});
/// Write all error messages (if any) in E to a string. The newline character
/// is used to separate error messages.
@@ -1055,6 +1066,8 @@ private:
class ECError : public ErrorInfo<ECError> {
friend Error errorCodeToError(std::error_code);
+ virtual void anchor() override;
+
public:
void setErrorCode(std::error_code EC) { this->EC = EC; }
std::error_code convertToErrorCode() const override { return EC; }
@@ -1106,10 +1119,33 @@ template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) {
/// StringError is useful in cases where the client is not expected to be able
/// to consume the specific error message programmatically (for example, if the
/// error message is to be presented to the user).
+///
+/// StringError can also be used when additional information is to be printed
+/// along with a error_code message. Depending on the constructor called, this
+/// class can either display:
+/// 1. the error_code message (ECError behavior)
+/// 2. a string
+/// 3. the error_code message and a string
+///
+/// These behaviors are useful when subtyping is required; for example, when a
+/// specific library needs an explicit error type. In the example below,
+/// PDBError is derived from StringError:
+///
+/// @code{.cpp}
+/// Expected<int> foo() {
+/// return llvm::make_error<PDBError>(pdb_error_code::dia_failed_loading,
+/// "Additional information");
+/// }
+/// @endcode
+///
class StringError : public ErrorInfo<StringError> {
public:
static char ID;
+ // Prints EC + S and converts to EC
+ StringError(std::error_code EC, const Twine &S = Twine());
+
+ // Prints S and converts to EC
StringError(const Twine &S, std::error_code EC);
void log(raw_ostream &OS) const override;
@@ -1120,6 +1156,7 @@ public:
private:
std::string Msg;
std::error_code EC;
+ const bool PrintMsgOnly = false;
};
/// Create formatted StringError object.
@@ -1134,6 +1171,53 @@ Error createStringError(std::error_code EC, char const *Fmt,
Error createStringError(std::error_code EC, char const *Msg);
+/// This class wraps a filename and another Error.
+///
+/// In some cases, an error needs to live along a 'source' name, in order to
+/// show more detailed information to the user.
+class FileError final : public ErrorInfo<FileError> {
+
+ friend Error createFileError(std::string, Error);
+
+public:
+ void log(raw_ostream &OS) const override {
+ assert(Err && !FileName.empty() && "Trying to log after takeError().");
+ OS << "'" << FileName << "': ";
+ Err->log(OS);
+ }
+
+ Error takeError() { return Error(std::move(Err)); }
+
+ std::error_code convertToErrorCode() const override;
+
+ // Used by ErrorInfo::classID.
+ static char ID;
+
+private:
+ FileError(std::string F, std::unique_ptr<ErrorInfoBase> E) {
+ assert(E && "Cannot create FileError from Error success value.");
+ assert(!F.empty() &&
+ "The file name provided to FileError must not be empty.");
+ FileName = F;
+ Err = std::move(E);
+ }
+
+ static Error build(std::string F, Error E) {
+ return Error(std::unique_ptr<FileError>(new FileError(F, E.takePayload())));
+ }
+
+ std::string FileName;
+ std::unique_ptr<ErrorInfoBase> Err;
+};
+
+/// Concatenate a source file path and/or name with an Error. The resulting
+/// Error is unchecked.
+inline Error createFileError(std::string F, Error E) {
+ return FileError::build(F, std::move(E));
+}
+
+Error createFileError(std::string F, ErrorSuccess) = delete;
+
/// Helper for check-and-exit error handling.
///
/// For tool use only. NOT FOR USE IN LIBRARY CODE.
@@ -1183,6 +1267,17 @@ private:
std::function<int(const Error &)> GetExitCode;
};
+/// Conversion from Error to LLVMErrorRef for C error bindings.
+inline LLVMErrorRef wrap(Error Err) {
+ return reinterpret_cast<LLVMErrorRef>(Err.takePayload().release());
+}
+
+/// Conversion from LLVMErrorRef to Error for C error bindings.
+inline Error unwrap(LLVMErrorRef ErrRef) {
+ return Error(std::unique_ptr<ErrorInfoBase>(
+ reinterpret_cast<ErrorInfoBase *>(ErrRef)));
+}
+
} // end namespace llvm
#endif // LLVM_SUPPORT_ERROR_H
diff --git a/contrib/llvm/include/llvm/Support/ErrorHandling.h b/contrib/llvm/include/llvm/Support/ErrorHandling.h
index 39cbfed2436a..fec39e59a717 100644
--- a/contrib/llvm/include/llvm/Support/ErrorHandling.h
+++ b/contrib/llvm/include/llvm/Support/ErrorHandling.h
@@ -112,8 +112,8 @@ void install_out_of_memory_new_handler();
/// in the unwind chain.
///
/// If no error handler is installed (default), then a bad_alloc exception
-/// is thrown, if LLVM is compiled with exception support, otherwise an assertion
-/// is called.
+/// is thrown, if LLVM is compiled with exception support, otherwise an
+/// assertion is called.
void report_bad_alloc_error(const char *Reason, bool GenCrashDiag = true);
/// This function calls abort(), and prints the optional message to stderr.
diff --git a/contrib/llvm/include/llvm/Support/FileCheck.h b/contrib/llvm/include/llvm/Support/FileCheck.h
new file mode 100644
index 000000000000..4061a26e22c5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileCheck.h
@@ -0,0 +1,282 @@
+//==-- llvm/Support/FileCheck.h ---------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file has some utilities to use FileCheck as an API
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILECHECK_H
+#define LLVM_SUPPORT_FILECHECK_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/SourceMgr.h"
+#include <vector>
+#include <map>
+
+namespace llvm {
+
+/// Contains info about various FileCheck options.
+struct FileCheckRequest {
+ std::vector<std::string> CheckPrefixes;
+ bool NoCanonicalizeWhiteSpace = false;
+ std::vector<std::string> ImplicitCheckNot;
+ std::vector<std::string> GlobalDefines;
+ bool AllowEmptyInput = false;
+ bool MatchFullLines = false;
+ bool EnableVarScope = false;
+ bool AllowDeprecatedDagOverlap = false;
+ bool Verbose = false;
+ bool VerboseVerbose = false;
+};
+
+
+//===----------------------------------------------------------------------===//
+// Pattern Handling Code.
+//===----------------------------------------------------------------------===//
+
+namespace Check {
+
+enum FileCheckKind {
+ CheckNone = 0,
+ CheckPlain,
+ CheckNext,
+ CheckSame,
+ CheckNot,
+ CheckDAG,
+ CheckLabel,
+ CheckEmpty,
+
+ /// Indicates the pattern only matches the end of file. This is used for
+ /// trailing CHECK-NOTs.
+ CheckEOF,
+
+ /// Marks when parsing found a -NOT check combined with another CHECK suffix.
+ CheckBadNot,
+
+ /// Marks when parsing found a -COUNT directive with invalid count value.
+ CheckBadCount
+};
+
+class FileCheckType {
+ FileCheckKind Kind;
+ int Count; ///< optional Count for some checks
+
+public:
+ FileCheckType(FileCheckKind Kind = CheckNone) : Kind(Kind), Count(1) {}
+ FileCheckType(const FileCheckType &) = default;
+
+ operator FileCheckKind() const { return Kind; }
+
+ int getCount() const { return Count; }
+ FileCheckType &setCount(int C);
+
+ std::string getDescription(StringRef Prefix) const;
+};
+}
+
+struct FileCheckDiag;
+
+class FileCheckPattern {
+ SMLoc PatternLoc;
+
+ /// A fixed string to match as the pattern or empty if this pattern requires
+ /// a regex match.
+ StringRef FixedStr;
+
+ /// A regex string to match as the pattern or empty if this pattern requires
+ /// a fixed string to match.
+ std::string RegExStr;
+
+ /// Entries in this vector map to uses of a variable in the pattern, e.g.
+ /// "foo[[bar]]baz". In this case, the RegExStr will contain "foobaz" and
+ /// we'll get an entry in this vector that tells us to insert the value of
+ /// bar at offset 3.
+ std::vector<std::pair<StringRef, unsigned>> VariableUses;
+
+ /// Maps definitions of variables to their parenthesized capture numbers.
+ ///
+ /// E.g. for the pattern "foo[[bar:.*]]baz", VariableDefs will map "bar" to
+ /// 1.
+ std::map<StringRef, unsigned> VariableDefs;
+
+ Check::FileCheckType CheckTy;
+
+ /// Contains the number of line this pattern is in.
+ unsigned LineNumber;
+
+public:
+ explicit FileCheckPattern(Check::FileCheckType Ty)
+ : CheckTy(Ty) {}
+
+ /// Returns the location in source code.
+ SMLoc getLoc() const { return PatternLoc; }
+
+ bool ParsePattern(StringRef PatternStr, StringRef Prefix, SourceMgr &SM,
+ unsigned LineNumber, const FileCheckRequest &Req);
+ size_t Match(StringRef Buffer, size_t &MatchLen,
+ StringMap<StringRef> &VariableTable) const;
+ void PrintVariableUses(const SourceMgr &SM, StringRef Buffer,
+ const StringMap<StringRef> &VariableTable,
+ SMRange MatchRange = None) const;
+ void PrintFuzzyMatch(const SourceMgr &SM, StringRef Buffer,
+ const StringMap<StringRef> &VariableTable,
+ std::vector<FileCheckDiag> *Diags) const;
+
+ bool hasVariable() const {
+ return !(VariableUses.empty() && VariableDefs.empty());
+ }
+
+ Check::FileCheckType getCheckTy() const { return CheckTy; }
+
+ int getCount() const { return CheckTy.getCount(); }
+
+private:
+ bool AddRegExToRegEx(StringRef RS, unsigned &CurParen, SourceMgr &SM);
+ void AddBackrefToRegEx(unsigned BackrefNum);
+ unsigned
+ ComputeMatchDistance(StringRef Buffer,
+ const StringMap<StringRef> &VariableTable) const;
+ bool EvaluateExpression(StringRef Expr, std::string &Value) const;
+ size_t FindRegexVarEnd(StringRef Str, SourceMgr &SM);
+};
+
+//===----------------------------------------------------------------------===//
+/// Summary of a FileCheck diagnostic.
+//===----------------------------------------------------------------------===//
+
+struct FileCheckDiag {
+ /// What is the FileCheck directive for this diagnostic?
+ Check::FileCheckType CheckTy;
+ /// Where is the FileCheck directive for this diagnostic?
+ unsigned CheckLine, CheckCol;
+ /// What type of match result does this diagnostic describe?
+ ///
+ /// A directive's supplied pattern is said to be either expected or excluded
+ /// depending on whether the pattern must have or must not have a match in
+ /// order for the directive to succeed. For example, a CHECK directive's
+ /// pattern is expected, and a CHECK-NOT directive's pattern is excluded.
+ /// All match result types whose names end with "Excluded" are for excluded
+ /// patterns, and all others are for expected patterns.
+ ///
+ /// There might be more than one match result for a single pattern. For
+ /// example, there might be several discarded matches
+ /// (MatchFoundButDiscarded) before either a good match
+ /// (MatchFoundAndExpected) or a failure to match (MatchNoneButExpected),
+ /// and there might be a fuzzy match (MatchFuzzy) after the latter.
+ enum MatchType {
+ /// Indicates a good match for an expected pattern.
+ MatchFoundAndExpected,
+ /// Indicates a match for an excluded pattern.
+ MatchFoundButExcluded,
+ /// Indicates a match for an expected pattern, but the match is on the
+ /// wrong line.
+ MatchFoundButWrongLine,
+ /// Indicates a discarded match for an expected pattern.
+ MatchFoundButDiscarded,
+ /// Indicates no match for an excluded pattern.
+ MatchNoneAndExcluded,
+ /// Indicates no match for an expected pattern, but this might follow good
+ /// matches when multiple matches are expected for the pattern, or it might
+ /// follow discarded matches for the pattern.
+ MatchNoneButExpected,
+ /// Indicates a fuzzy match that serves as a suggestion for the next
+ /// intended match for an expected pattern with too few or no good matches.
+ MatchFuzzy,
+ } MatchTy;
+ /// The search range if MatchTy is MatchNoneAndExcluded or
+ /// MatchNoneButExpected, or the match range otherwise.
+ unsigned InputStartLine;
+ unsigned InputStartCol;
+ unsigned InputEndLine;
+ unsigned InputEndCol;
+ FileCheckDiag(const SourceMgr &SM, const Check::FileCheckType &CheckTy,
+ SMLoc CheckLoc, MatchType MatchTy, SMRange InputRange);
+};
+
+//===----------------------------------------------------------------------===//
+// Check Strings.
+//===----------------------------------------------------------------------===//
+
+/// A check that we found in the input file.
+struct FileCheckString {
+ /// The pattern to match.
+ FileCheckPattern Pat;
+
+ /// Which prefix name this check matched.
+ StringRef Prefix;
+
+ /// The location in the match file that the check string was specified.
+ SMLoc Loc;
+
+ /// All of the strings that are disallowed from occurring between this match
+ /// string and the previous one (or start of file).
+ std::vector<FileCheckPattern> DagNotStrings;
+
+ FileCheckString(const FileCheckPattern &P, StringRef S, SMLoc L)
+ : Pat(P), Prefix(S), Loc(L) {}
+
+ size_t Check(const SourceMgr &SM, StringRef Buffer, bool IsLabelScanMode,
+ size_t &MatchLen, StringMap<StringRef> &VariableTable,
+ FileCheckRequest &Req, std::vector<FileCheckDiag> *Diags) const;
+
+ bool CheckNext(const SourceMgr &SM, StringRef Buffer) const;
+ bool CheckSame(const SourceMgr &SM, StringRef Buffer) const;
+ bool CheckNot(const SourceMgr &SM, StringRef Buffer,
+ const std::vector<const FileCheckPattern *> &NotStrings,
+ StringMap<StringRef> &VariableTable,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const;
+ size_t CheckDag(const SourceMgr &SM, StringRef Buffer,
+ std::vector<const FileCheckPattern *> &NotStrings,
+ StringMap<StringRef> &VariableTable,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const;
+};
+
+/// FileCheck class takes the request and exposes various methods that
+/// use information from the request.
+class FileCheck {
+ FileCheckRequest Req;
+
+public:
+ FileCheck(FileCheckRequest Req) : Req(Req) {}
+
+ // Combines the check prefixes into a single regex so that we can efficiently
+ // scan for any of the set.
+ //
+ // The semantics are that the longest-match wins which matches our regex
+ // library.
+ Regex buildCheckPrefixRegex();
+
+ /// Read the check file, which specifies the sequence of expected strings.
+ ///
+ /// The strings are added to the CheckStrings vector. Returns true in case of
+ /// an error, false otherwise.
+ bool ReadCheckFile(SourceMgr &SM, StringRef Buffer, Regex &PrefixRE,
+ std::vector<FileCheckString> &CheckStrings);
+
+ bool ValidateCheckPrefixes();
+
+ /// Canonicalize whitespaces in the file. Line endings are replaced with
+ /// UNIX-style '\n'.
+ StringRef CanonicalizeFile(MemoryBuffer &MB,
+ SmallVectorImpl<char> &OutputBuffer);
+
+ /// Check the input to FileCheck provided in the \p Buffer against the \p
+ /// CheckStrings read from the check file.
+ ///
+ /// Returns false if the input fails to satisfy the checks.
+ bool CheckInput(SourceMgr &SM, StringRef Buffer,
+ ArrayRef<FileCheckString> CheckStrings,
+ std::vector<FileCheckDiag> *Diags = nullptr);
+};
+} // namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
index ee8cbb730878..68226ca55502 100644
--- a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
+++ b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
@@ -76,6 +76,10 @@ public:
/// deallocates the buffer and the target file is never written.
virtual ~FileOutputBuffer() {}
+ /// This removes the temporary file (unless it already was committed)
+ /// but keeps the memory mapping alive.
+ virtual void discard() {}
+
protected:
FileOutputBuffer(StringRef Path) : FinalPath(Path) {}
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
index 02db4596bf1c..d2042f51d8c1 100644
--- a/contrib/llvm/include/llvm/Support/FileSystem.h
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -160,6 +160,8 @@ protected:
#if defined(LLVM_ON_UNIX)
time_t fs_st_atime = 0;
time_t fs_st_mtime = 0;
+ uint32_t fs_st_atime_nsec = 0;
+ uint32_t fs_st_mtime_nsec = 0;
uid_t fs_st_uid = 0;
gid_t fs_st_gid = 0;
off_t fs_st_size = 0;
@@ -180,9 +182,12 @@ public:
explicit basic_file_status(file_type Type) : Type(Type) {}
#if defined(LLVM_ON_UNIX)
- basic_file_status(file_type Type, perms Perms, time_t ATime, time_t MTime,
+ basic_file_status(file_type Type, perms Perms, time_t ATime,
+ uint32_t ATimeNSec, time_t MTime, uint32_t MTimeNSec,
uid_t UID, gid_t GID, off_t Size)
- : fs_st_atime(ATime), fs_st_mtime(MTime), fs_st_uid(UID), fs_st_gid(GID),
+ : fs_st_atime(ATime), fs_st_mtime(MTime),
+ fs_st_atime_nsec(ATimeNSec), fs_st_mtime_nsec(MTimeNSec),
+ fs_st_uid(UID), fs_st_gid(GID),
fs_st_size(Size), Type(Type), Perms(Perms) {}
#elif defined(_WIN32)
basic_file_status(file_type Type, perms Perms, uint32_t LastAccessTimeHigh,
@@ -199,7 +204,20 @@ public:
// getters
file_type type() const { return Type; }
perms permissions() const { return Perms; }
+
+ /// The file access time as reported from the underlying file system.
+ ///
+ /// Also see comments on \c getLastModificationTime() related to the precision
+ /// of the returned value.
TimePoint<> getLastAccessedTime() const;
+
+ /// The file modification time as reported from the underlying file system.
+ ///
+ /// The returned value allows for nanosecond precision but the actual
+ /// resolution is an implementation detail of the underlying file system.
+ /// There is no guarantee for what kind of resolution you can expect, the
+ /// resolution can differ across platforms and even across mountpoints on the
+ /// same machine.
TimePoint<> getLastModificationTime() const;
#if defined(LLVM_ON_UNIX)
@@ -247,8 +265,11 @@ public:
#if defined(LLVM_ON_UNIX)
file_status(file_type Type, perms Perms, dev_t Dev, nlink_t Links, ino_t Ino,
- time_t ATime, time_t MTime, uid_t UID, gid_t GID, off_t Size)
- : basic_file_status(Type, Perms, ATime, MTime, UID, GID, Size),
+ time_t ATime, uint32_t ATimeNSec,
+ time_t MTime, uint32_t MTimeNSec,
+ uid_t UID, gid_t GID, off_t Size)
+ : basic_file_status(Type, Perms, ATime, ATimeNSec, MTime, MTimeNSec,
+ UID, GID, Size),
fs_st_dev(Dev), fs_st_nlinks(Links), fs_st_ino(Ino) {}
#elif defined(_WIN32)
file_status(file_type Type, perms Perms, uint32_t LinkCount,
@@ -281,10 +302,7 @@ public:
/// relative/../path => <current-directory>/relative/../path
///
/// @param path A path that is modified to be an absolute path.
-/// @returns errc::success if \a path has been made absolute, otherwise a
-/// platform-specific error_code.
-std::error_code make_absolute(const Twine &current_directory,
- SmallVectorImpl<char> &path);
+void make_absolute(const Twine &current_directory, SmallVectorImpl<char> &path);
/// Make \a path an absolute path.
///
@@ -349,6 +367,12 @@ std::error_code create_hard_link(const Twine &to, const Twine &from);
std::error_code real_path(const Twine &path, SmallVectorImpl<char> &output,
bool expand_tilde = false);
+/// Expands ~ expressions to the user's home directory. On Unix ~user
+/// directories are resolved as well.
+///
+/// @param path The path to resolve.
+void expand_tilde(const Twine &path, SmallVectorImpl<char> &output);
+
/// Get the current path.
///
/// @param result Holds the current path on return.
@@ -666,7 +690,15 @@ inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
/// @returns errc::success if the file times were successfully set, otherwise a
/// platform-specific error_code or errc::function_not_supported on
/// platforms where the functionality isn't available.
-std::error_code setLastModificationAndAccessTime(int FD, TimePoint<> Time);
+std::error_code setLastAccessAndModificationTime(int FD, TimePoint<> AccessTime,
+ TimePoint<> ModificationTime);
+
+/// Simpler version that sets both file modification and access time to the same
+/// time.
+inline std::error_code setLastAccessAndModificationTime(int FD,
+ TimePoint<> Time) {
+ return setLastAccessAndModificationTime(FD, Time, Time);
+}
/// Is status available?
///
@@ -693,7 +725,7 @@ enum CreationDisposition : unsigned {
/// * If it does not already exist, create a new file.
CD_CreateNew = 1,
- /// CD_OpenAlways - When opening a file:
+ /// CD_OpenExisting - When opening a file:
/// * If it already exists, open the file with the offset set to 0.
/// * If it does not already exist, fail.
CD_OpenExisting = 2,
@@ -1092,38 +1124,51 @@ std::string getMainExecutable(const char *argv0, void *MainExecAddr);
/// @name Iterators
/// @{
-/// directory_entry - A single entry in a directory. Caches the status either
-/// from the result of the iteration syscall, or the first time status is
-/// called.
+/// directory_entry - A single entry in a directory.
class directory_entry {
+ // FIXME: different platforms make different information available "for free"
+ // when traversing a directory. The design of this class wraps most of the
+ // information in basic_file_status, so on platforms where we can't populate
+ // that whole structure, callers end up paying for a stat().
+ // std::filesystem::directory_entry may be a better model.
std::string Path;
- bool FollowSymlinks;
- basic_file_status Status;
+ file_type Type; // Most platforms can provide this.
+ bool FollowSymlinks; // Affects the behavior of status().
+ basic_file_status Status; // If available.
public:
- explicit directory_entry(const Twine &path, bool follow_symlinks = true,
- basic_file_status st = basic_file_status())
- : Path(path.str()), FollowSymlinks(follow_symlinks), Status(st) {}
+ explicit directory_entry(const Twine &Path, bool FollowSymlinks = true,
+ file_type Type = file_type::type_unknown,
+ basic_file_status Status = basic_file_status())
+ : Path(Path.str()), Type(Type), FollowSymlinks(FollowSymlinks),
+ Status(Status) {}
directory_entry() = default;
- void assign(const Twine &path, basic_file_status st = basic_file_status()) {
- Path = path.str();
- Status = st;
- }
-
- void replace_filename(const Twine &filename,
- basic_file_status st = basic_file_status());
+ void replace_filename(const Twine &Filename, file_type Type,
+ basic_file_status Status = basic_file_status());
const std::string &path() const { return Path; }
+ // Get basic information about entry file (a subset of fs::status()).
+ // On most platforms this is a stat() call.
+ // On windows the information was already retrieved from the directory.
ErrorOr<basic_file_status> status() const;
+ // Get the type of this file.
+ // On most platforms (Linux/Mac/Windows/BSD), this was already retrieved.
+ // On some platforms (e.g. Solaris) this is a stat() call.
+ file_type type() const {
+ if (Type != file_type::type_unknown)
+ return Type;
+ auto S = status();
+ return S ? S->type() : file_type::type_unknown;
+ }
- bool operator==(const directory_entry& rhs) const { return Path == rhs.Path; }
- bool operator!=(const directory_entry& rhs) const { return !(*this == rhs); }
- bool operator< (const directory_entry& rhs) const;
- bool operator<=(const directory_entry& rhs) const;
- bool operator> (const directory_entry& rhs) const;
- bool operator>=(const directory_entry& rhs) const;
+ bool operator==(const directory_entry& RHS) const { return Path == RHS.Path; }
+ bool operator!=(const directory_entry& RHS) const { return !(*this == RHS); }
+ bool operator< (const directory_entry& RHS) const;
+ bool operator<=(const directory_entry& RHS) const;
+ bool operator> (const directory_entry& RHS) const;
+ bool operator>=(const directory_entry& RHS) const;
};
namespace detail {
@@ -1161,7 +1206,6 @@ public:
SmallString<128> path_storage;
ec = detail::directory_iterator_construct(
*State, path.toStringRef(path_storage), FollowSymlinks);
- update_error_code_for_current_entry(ec);
}
explicit directory_iterator(const directory_entry &de, std::error_code &ec,
@@ -1170,7 +1214,6 @@ public:
State = std::make_shared<detail::DirIterState>();
ec = detail::directory_iterator_construct(
*State, de.path(), FollowSymlinks);
- update_error_code_for_current_entry(ec);
}
/// Construct end iterator.
@@ -1179,7 +1222,6 @@ public:
// No operator++ because we need error_code.
directory_iterator &increment(std::error_code &ec) {
ec = directory_iterator_increment(*State);
- update_error_code_for_current_entry(ec);
return *this;
}
@@ -1199,26 +1241,6 @@ public:
bool operator!=(const directory_iterator &RHS) const {
return !(*this == RHS);
}
- // Other members as required by
- // C++ Std, 24.1.1 Input iterators [input.iterators]
-
-private:
- // Checks if current entry is valid and populates error code. For example,
- // current entry may not exist due to broken symbol links.
- void update_error_code_for_current_entry(std::error_code &ec) {
- // Bail out if error has already occured earlier to avoid overwriting it.
- if (ec)
- return;
-
- // Empty directory entry is used to mark the end of an interation, it's not
- // an error.
- if (State->CurrentEntry == directory_entry())
- return;
-
- ErrorOr<basic_file_status> status = State->CurrentEntry.status();
- if (!status)
- ec = status.getError();
- }
};
namespace detail {
@@ -1256,8 +1278,15 @@ public:
if (State->HasNoPushRequest)
State->HasNoPushRequest = false;
else {
- ErrorOr<basic_file_status> status = State->Stack.top()->status();
- if (status && is_directory(*status)) {
+ file_type type = State->Stack.top()->type();
+ if (type == file_type::symlink_file && Follow) {
+ // Resolve the symlink: is it a directory to recurse into?
+ ErrorOr<basic_file_status> status = State->Stack.top()->status();
+ if (status)
+ type = status->type();
+ // Otherwise broken symlink, and we'll continue.
+ }
+ if (type == file_type::directory_file) {
State->Stack.push(directory_iterator(*State->Stack.top(), ec, Follow));
if (State->Stack.top() != end_itr) {
++State->Level;
@@ -1321,8 +1350,6 @@ public:
bool operator!=(const recursive_directory_iterator &RHS) const {
return !(*this == RHS);
}
- // Other members as required by
- // C++ Std, 24.1.1 Input iterators [input.iterators]
};
/// @}
diff --git a/contrib/llvm/include/llvm/Support/FormatVariadicDetails.h b/contrib/llvm/include/llvm/Support/FormatVariadicDetails.h
index 56dda430efda..e8bd90f50941 100644
--- a/contrib/llvm/include/llvm/Support/FormatVariadicDetails.h
+++ b/contrib/llvm/include/llvm/Support/FormatVariadicDetails.h
@@ -21,6 +21,8 @@ class Error;
namespace detail {
class format_adapter {
+ virtual void anchor();
+
protected:
virtual ~format_adapter() {}
diff --git a/contrib/llvm/include/llvm/Support/GenericDomTree.h b/contrib/llvm/include/llvm/Support/GenericDomTree.h
index c716e4a4d300..b3018bac310a 100644
--- a/contrib/llvm/include/llvm/Support/GenericDomTree.h
+++ b/contrib/llvm/include/llvm/Support/GenericDomTree.h
@@ -24,6 +24,14 @@
#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
#define LLVM_SUPPORT_GENERICDOMTREE_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CFGUpdate.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -32,13 +40,6 @@
#include <type_traits>
#include <utility>
#include <vector>
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -192,6 +193,10 @@ template <typename DomTreeT>
void Calculate(DomTreeT &DT);
template <typename DomTreeT>
+void CalculateWithUpdates(DomTreeT &DT,
+ ArrayRef<typename DomTreeT::UpdateType> Updates);
+
+template <typename DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To);
@@ -199,36 +204,6 @@ template <typename DomTreeT>
void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To);
-// UpdateKind and Update are used by the batch update API and it's easiest to
-// define them here.
-enum class UpdateKind : unsigned char { Insert, Delete };
-
-template <typename NodePtr>
-struct Update {
- using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
-
- NodePtr From;
- NodeKindPair ToAndKind;
-
- Update(UpdateKind Kind, NodePtr From, NodePtr To)
- : From(From), ToAndKind(To, Kind) {}
-
- UpdateKind getKind() const { return ToAndKind.getInt(); }
- NodePtr getFrom() const { return From; }
- NodePtr getTo() const { return ToAndKind.getPointer(); }
- bool operator==(const Update &RHS) const {
- return From == RHS.From && ToAndKind == RHS.ToAndKind;
- }
-
- friend raw_ostream &operator<<(raw_ostream &OS, const Update &U) {
- OS << (U.getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
- U.getFrom()->printAsOperand(OS, false);
- OS << " -> ";
- U.getTo()->printAsOperand(OS, false);
- return OS;
- }
-};
-
template <typename DomTreeT>
void ApplyUpdates(DomTreeT &DT,
ArrayRef<typename DomTreeT::UpdateType> Updates);
@@ -254,8 +229,8 @@ class DominatorTreeBase {
using ParentType = typename std::remove_pointer<ParentPtr>::type;
static constexpr bool IsPostDominator = IsPostDom;
- using UpdateType = DomTreeBuilder::Update<NodePtr>;
- using UpdateKind = DomTreeBuilder::UpdateKind;
+ using UpdateType = cfg::Update<NodePtr>;
+ using UpdateKind = cfg::UpdateKind;
static constexpr UpdateKind Insert = UpdateKind::Insert;
static constexpr UpdateKind Delete = UpdateKind::Delete;
@@ -759,6 +734,11 @@ public:
DomTreeBuilder::Calculate(*this);
}
+ void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
+ Parent = &Func;
+ DomTreeBuilder::CalculateWithUpdates(*this, Updates);
+ }
+
/// verify - checks if the tree is correct. There are 3 level of verification:
/// - Full -- verifies if the tree is correct by making sure all the
/// properties (including the parent and the sibling property)
diff --git a/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h b/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h
index 977f209f92b3..971e8305a112 100644
--- a/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h
@@ -71,6 +71,7 @@ struct SemiNCAInfo {
DenseMap<NodePtr, InfoRec> NodeToInfo;
using UpdateT = typename DomTreeT::UpdateType;
+ using UpdateKind = typename DomTreeT::UpdateKind;
struct BatchUpdateInfo {
SmallVector<UpdateT, 4> Updates;
using NodePtrAndKind = PointerIntPair<NodePtr, 1, UpdateKind>;
@@ -1166,7 +1167,8 @@ struct SemiNCAInfo {
}
BatchUpdateInfo BUI;
- LegalizeUpdates(Updates, BUI.Updates);
+ LLVM_DEBUG(dbgs() << "Legalizing " << BUI.Updates.size() << " updates\n");
+ cfg::LegalizeUpdates<NodePtr>(Updates, BUI.Updates, IsPostDom);
const size_t NumLegalized = BUI.Updates.size();
BUI.FutureSuccessors.reserve(NumLegalized);
@@ -1182,8 +1184,11 @@ struct SemiNCAInfo {
LLVM_DEBUG(dbgs() << "About to apply " << NumLegalized << " updates\n");
LLVM_DEBUG(if (NumLegalized < 32) for (const auto &U
- : reverse(BUI.Updates)) dbgs()
- << '\t' << U << "\n");
+ : reverse(BUI.Updates)) {
+ dbgs() << "\t";
+ U.dump();
+ dbgs() << "\n";
+ });
LLVM_DEBUG(dbgs() << "\n");
// Recalculate the DominatorTree when the number of updates
@@ -1207,76 +1212,11 @@ struct SemiNCAInfo {
ApplyNextUpdate(DT, BUI);
}
- // This function serves double purpose:
- // a) It removes redundant updates, which makes it easier to reverse-apply
- // them when traversing CFG.
- // b) It optimizes away updates that cancel each other out, as the end result
- // is the same.
- //
- // It relies on the property of the incremental updates that says that the
- // order of updates doesn't matter. This allows us to reorder them and end up
- // with the exact same DomTree every time.
- //
- // Following the same logic, the function doesn't care about the order of
- // input updates, so it's OK to pass it an unordered sequence of updates, that
- // doesn't make sense when applied sequentially, eg. performing double
- // insertions or deletions and then doing an opposite update.
- //
- // In the future, it should be possible to schedule updates in way that
- // minimizes the amount of work needed done during incremental updates.
- static void LegalizeUpdates(ArrayRef<UpdateT> AllUpdates,
- SmallVectorImpl<UpdateT> &Result) {
- LLVM_DEBUG(dbgs() << "Legalizing " << AllUpdates.size() << " updates\n");
- // Count the total number of inserions of each edge.
- // Each insertion adds 1 and deletion subtracts 1. The end number should be
- // one of {-1 (deletion), 0 (NOP), +1 (insertion)}. Otherwise, the sequence
- // of updates contains multiple updates of the same kind and we assert for
- // that case.
- SmallDenseMap<std::pair<NodePtr, NodePtr>, int, 4> Operations;
- Operations.reserve(AllUpdates.size());
-
- for (const auto &U : AllUpdates) {
- NodePtr From = U.getFrom();
- NodePtr To = U.getTo();
- if (IsPostDom) std::swap(From, To); // Reverse edge for postdominators.
-
- Operations[{From, To}] += (U.getKind() == UpdateKind::Insert ? 1 : -1);
- }
-
- Result.clear();
- Result.reserve(Operations.size());
- for (auto &Op : Operations) {
- const int NumInsertions = Op.second;
- assert(std::abs(NumInsertions) <= 1 && "Unbalanced operations!");
- if (NumInsertions == 0) continue;
- const UpdateKind UK =
- NumInsertions > 0 ? UpdateKind::Insert : UpdateKind::Delete;
- Result.push_back({UK, Op.first.first, Op.first.second});
- }
-
- // Make the order consistent by not relying on pointer values within the
- // set. Reuse the old Operations map.
- // In the future, we should sort by something else to minimize the amount
- // of work needed to perform the series of updates.
- for (size_t i = 0, e = AllUpdates.size(); i != e; ++i) {
- const auto &U = AllUpdates[i];
- if (!IsPostDom)
- Operations[{U.getFrom(), U.getTo()}] = int(i);
- else
- Operations[{U.getTo(), U.getFrom()}] = int(i);
- }
-
- llvm::sort(Result.begin(), Result.end(),
- [&Operations](const UpdateT &A, const UpdateT &B) {
- return Operations[{A.getFrom(), A.getTo()}] >
- Operations[{B.getFrom(), B.getTo()}];
- });
- }
-
static void ApplyNextUpdate(DomTreeT &DT, BatchUpdateInfo &BUI) {
assert(!BUI.Updates.empty() && "No updates to apply!");
UpdateT CurrentUpdate = BUI.Updates.pop_back_val();
- LLVM_DEBUG(dbgs() << "Applying update: " << CurrentUpdate << "\n");
+ LLVM_DEBUG(dbgs() << "Applying update: ");
+ LLVM_DEBUG(CurrentUpdate.dump(); dbgs() << "\n");
// Move to the next snapshot of the CFG by removing the reverse-applied
// current update. Since updates are performed in the same order they are
@@ -1460,10 +1400,9 @@ struct SemiNCAInfo {
// Make a copy and sort it such that it is possible to check if there are
// no gaps between DFS numbers of adjacent children.
SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end());
- llvm::sort(Children.begin(), Children.end(),
- [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
- return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
- });
+ llvm::sort(Children, [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
+ return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
+ });
auto PrintChildrenError = [Node, &Children, PrintNodeAndDFSNums](
const TreeNodePtr FirstCh, const TreeNodePtr SecondCh) {
@@ -1650,6 +1589,25 @@ void Calculate(DomTreeT &DT) {
SemiNCAInfo<DomTreeT>::CalculateFromScratch(DT, nullptr);
}
+template <typename DomTreeT>
+void CalculateWithUpdates(DomTreeT &DT,
+ ArrayRef<typename DomTreeT::UpdateType> Updates) {
+ // TODO: Move BUI creation in common method, reuse in ApplyUpdates.
+ typename SemiNCAInfo<DomTreeT>::BatchUpdateInfo BUI;
+ LLVM_DEBUG(dbgs() << "Legalizing " << BUI.Updates.size() << " updates\n");
+ cfg::LegalizeUpdates<typename DomTreeT::NodePtr>(Updates, BUI.Updates,
+ DomTreeT::IsPostDominator);
+ const size_t NumLegalized = BUI.Updates.size();
+ BUI.FutureSuccessors.reserve(NumLegalized);
+ BUI.FuturePredecessors.reserve(NumLegalized);
+ for (auto &U : BUI.Updates) {
+ BUI.FutureSuccessors[U.getFrom()].push_back({U.getTo(), U.getKind()});
+ BUI.FuturePredecessors[U.getTo()].push_back({U.getFrom(), U.getKind()});
+ }
+
+ SemiNCAInfo<DomTreeT>::CalculateFromScratch(DT, &BUI);
+}
+
template <class DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To) {
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
index c9a9f409c522..02d98bec16e2 100644
--- a/contrib/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -27,6 +27,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/DOTGraphTraits.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstddef>
@@ -320,14 +321,32 @@ raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
std::string createGraphFilename(const Twine &Name, int &FD);
+/// Writes graph into a provided {@code Filename}.
+/// If {@code Filename} is empty, generates a random one.
+/// \return The resulting filename, or an empty string if writing
+/// failed.
template <typename GraphType>
std::string WriteGraph(const GraphType &G, const Twine &Name,
- bool ShortNames = false, const Twine &Title = "") {
+ bool ShortNames = false,
+ const Twine &Title = "",
+ std::string Filename = "") {
int FD;
// Windows can't always handle long paths, so limit the length of the name.
std::string N = Name.str();
N = N.substr(0, std::min<std::size_t>(N.size(), 140));
- std::string Filename = createGraphFilename(N, FD);
+ if (Filename.empty()) {
+ Filename = createGraphFilename(N, FD);
+ } else {
+ std::error_code EC = sys::fs::openFileForWrite(Filename, FD);
+
+ // Writing over an existing file is not considered an error.
+ if (EC == std::errc::file_exists) {
+ errs() << "file exists, overwriting" << "\n";
+ } else if (EC) {
+ errs() << "error writing into file" << "\n";
+ return "";
+ }
+ }
raw_fd_ostream O(FD, /*shouldClose=*/ true);
if (FD == -1) {
diff --git a/contrib/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h b/contrib/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h
new file mode 100644
index 000000000000..34eb9f7deaaf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h
@@ -0,0 +1,93 @@
+//===--- ItaniumManglingCanonicalizer.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a class for computing equivalence classes of mangled names
+// given a set of equivalences between name fragments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
+#define LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
+
+#include "llvm/ADT/StringRef.h"
+
+#include <cstddef>
+
+namespace llvm {
+/// Canonicalizer for mangled names.
+///
+/// This class allows specifying a list of "equivalent" manglings. For example,
+/// you can specify that Ss is equivalent to
+/// NSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE
+/// and then manglings that refer to libstdc++'s 'std::string' will be
+/// considered equivalent to manglings that are the same except that they refer
+/// to libc++'s 'std::string'.
+///
+/// This can be used when data (eg, profiling data) is available for a version
+/// of a program built in a different configuration, with correspondingly
+/// different manglings.
+class ItaniumManglingCanonicalizer {
+public:
+ ItaniumManglingCanonicalizer();
+ ItaniumManglingCanonicalizer(const ItaniumManglingCanonicalizer &) = delete;
+ void operator=(const ItaniumManglingCanonicalizer &) = delete;
+ ~ItaniumManglingCanonicalizer();
+
+ enum class EquivalenceError {
+ Success,
+
+ /// Both the equivalent manglings have already been used as components of
+ /// some other mangling we've looked at. It's too late to add this
+ /// equivalence.
+ ManglingAlreadyUsed,
+
+ /// The first equivalent mangling is invalid.
+ InvalidFirstMangling,
+
+ /// The second equivalent mangling is invalid.
+ InvalidSecondMangling,
+ };
+
+ enum class FragmentKind {
+ /// The mangling fragment is a <name> (or a predefined <substitution>).
+ Name,
+ /// The mangling fragment is a <type>.
+ Type,
+ /// The mangling fragment is an <encoding>.
+ Encoding,
+ };
+
+ /// Add an equivalence between \p First and \p Second. Both manglings must
+ /// live at least as long as the canonicalizer.
+ EquivalenceError addEquivalence(FragmentKind Kind, StringRef First,
+ StringRef Second);
+
+ using Key = uintptr_t;
+
+ /// Form a canonical key for the specified mangling. They key will be the
+ /// same for all equivalent manglings, and different for any two
+ /// non-equivalent manglings, but is otherwise unspecified.
+ ///
+ /// Returns Key() if (and only if) the mangling is not a valid Itanium C++
+ /// ABI mangling.
+ ///
+ /// The string denoted by Mangling must live as long as the canonicalizer.
+ Key canonicalize(StringRef Mangling);
+
+ /// Find a canonical key for the specified mangling, if one has already been
+ /// formed. Otherwise returns Key().
+ Key lookup(StringRef Mangling);
+
+private:
+ struct Impl;
+ Impl *P;
+};
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
diff --git a/contrib/llvm/include/llvm/Support/JSON.h b/contrib/llvm/include/llvm/Support/JSON.h
index da3c5ea0b25d..7a04fd52bc50 100644
--- a/contrib/llvm/include/llvm/Support/JSON.h
+++ b/contrib/llvm/include/llvm/Support/JSON.h
@@ -294,9 +294,13 @@ public:
Value(json::Array &&Elements) : Type(T_Array) {
create<json::Array>(std::move(Elements));
}
+ template <typename Elt>
+ Value(const std::vector<Elt> &C) : Value(json::Array(C)) {}
Value(json::Object &&Properties) : Type(T_Object) {
create<json::Object>(std::move(Properties));
}
+ template <typename Elt>
+ Value(const std::map<std::string, Elt> &C) : Value(json::Object(C)) {}
// Strings: types with value semantics. Must be valid UTF-8.
Value(std::string V) : Type(T_String) {
if (LLVM_UNLIKELY(!isUTF8(V))) {
@@ -452,7 +456,10 @@ private:
new (reinterpret_cast<T *>(Union.buffer)) T(std::forward<U>(V)...);
}
template <typename T> T &as() const {
- return *reinterpret_cast<T *>(Union.buffer);
+ // Using this two-step static_cast via void * instead of reinterpret_cast
+ // silences a -Wstrict-aliasing false positive from GCC6 and earlier.
+ void *Storage = static_cast<void *>(Union.buffer);
+ return *static_cast<T *>(Storage);
}
template <typename Indenter>
diff --git a/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h b/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
index a0a5a52d206e..2a1075c9a48d 100644
--- a/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
+++ b/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
@@ -147,6 +147,7 @@ public:
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
+ friend class GISelInstProfileBuilder;
private:
/// LLT is packed into 64 bits as follows:
@@ -231,6 +232,11 @@ private:
maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo);
}
}
+
+ uint64_t getUniqueRAWLLTData() const {
+ return ((uint64_t)RawData) << 2 | ((uint64_t)IsPointer) << 1 |
+ ((uint64_t)IsVector);
+ }
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
@@ -250,8 +256,7 @@ template<> struct DenseMapInfo<LLT> {
return Invalid;
}
static inline unsigned getHashValue(const LLT &Ty) {
- uint64_t Val = ((uint64_t)Ty.RawData) << 2 | ((uint64_t)Ty.IsPointer) << 1 |
- ((uint64_t)Ty.IsVector);
+ uint64_t Val = Ty.getUniqueRAWLLTData();
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
diff --git a/contrib/llvm/include/llvm/Support/MSVCErrorWorkarounds.h b/contrib/llvm/include/llvm/Support/MSVCErrorWorkarounds.h
new file mode 100644
index 000000000000..053ecf64d1e9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MSVCErrorWorkarounds.h
@@ -0,0 +1,84 @@
+//===--- MSVCErrorWorkarounds.h - Enable future<Error> in MSVC --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MSVC's promise/future implementation requires types to be default
+// constructible, so this header provides analogues of Error an Expected
+// that are default constructed in a safely destructible state.
+//
+// FIXME: Kill off this header and migrate all users to Error/Expected once we
+// move to MSVC versions that support non-default-constructible types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H
+#define LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H
+
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+// A default-constructible llvm::Error that is suitable for use with MSVC's
+// std::future implementation which requires default constructible types.
+class MSVCPError : public Error {
+public:
+ MSVCPError() { (void)!!*this; }
+
+ MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}
+
+ MSVCPError &operator=(MSVCPError Other) {
+ Error::operator=(std::move(Other));
+ return *this;
+ }
+
+ MSVCPError(Error Err) : Error(std::move(Err)) {}
+};
+
+// A default-constructible llvm::Expected that is suitable for use with MSVC's
+// std::future implementation, which requires default constructible types.
+template <typename T> class MSVCPExpected : public Expected<T> {
+public:
+ MSVCPExpected()
+ : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
+ consumeError(this->takeError());
+ }
+
+ MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}
+
+ MSVCPExpected &operator=(MSVCPExpected &&Other) {
+ Expected<T>::operator=(std::move(Other));
+ return *this;
+ }
+
+ MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}
+
+ template <typename OtherT>
+ MSVCPExpected(
+ OtherT &&Val,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Val)) {}
+
+ template <class OtherT>
+ MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+
+ template <class OtherT>
+ explicit MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H
diff --git a/contrib/llvm/include/llvm/Support/Path.h b/contrib/llvm/include/llvm/Support/Path.h
index c4cc93721d7e..76de887b7cb4 100644
--- a/contrib/llvm/include/llvm/Support/Path.h
+++ b/contrib/llvm/include/llvm/Support/Path.h
@@ -361,22 +361,6 @@ void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
/// @result True if a home directory is set, false otherwise.
bool home_directory(SmallVectorImpl<char> &result);
-/// Get the user's cache directory.
-///
-/// Expect the resulting path to be a directory shared with other
-/// applications/services used by the user. Params \p Path1 to \p Path3 can be
-/// used to append additional directory names to the resulting path. Recommended
-/// pattern is <user_cache_directory>/<vendor>/<application>.
-///
-/// @param Result Holds the resulting path.
-/// @param Path1 Additional path to be appended to the user's cache directory
-/// path. "" can be used to append nothing.
-/// @param Path2 Second additional path to be appended.
-/// @param Path3 Third additional path to be appended.
-/// @result True if a cache directory path is set, false otherwise.
-bool user_cache_directory(SmallVectorImpl<char> &Result, const Twine &Path1,
- const Twine &Path2 = "", const Twine &Path3 = "");
-
/// Has root name?
///
/// root_name != ""
diff --git a/contrib/llvm/include/llvm/Support/ScopedPrinter.h b/contrib/llvm/include/llvm/Support/ScopedPrinter.h
index 062439b4f7db..34c1a287ee10 100644
--- a/contrib/llvm/include/llvm/Support/ScopedPrinter.h
+++ b/contrib/llvm/include/llvm/Support/ScopedPrinter.h
@@ -138,7 +138,7 @@ public:
}
}
- llvm::sort(SetFlags.begin(), SetFlags.end(), &flagName<TFlag>);
+ llvm::sort(SetFlags, &flagName<TFlag>);
startLine() << Label << " [ (" << hex(Value) << ")\n";
for (const auto &Flag : SetFlags) {
diff --git a/contrib/llvm/include/llvm/Support/SymbolRemappingReader.h b/contrib/llvm/include/llvm/Support/SymbolRemappingReader.h
new file mode 100644
index 000000000000..b457b9e817e4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SymbolRemappingReader.h
@@ -0,0 +1,133 @@
+//===- SymbolRemappingReader.h - Read symbol remapping file -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions needed for reading and applying symbol
+// remapping files.
+//
+// Support is provided only for the Itanium C++ name mangling scheme for now.
+//
+// NOTE: If you are making changes to this file format, please remember
+// to document them in the Clang documentation at
+// tools/clang/docs/UsersManual.rst.
+//
+// File format
+// -----------
+//
+// The symbol remappings are written as an ASCII text file. Blank lines and
+// lines starting with a # are ignored. All other lines specify a kind of
+// mangled name fragment, along with two fragments of that kind that should
+// be treated as equivalent, separated by spaces.
+//
+// See http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling for a
+// description of the Itanium name mangling scheme.
+//
+// The accepted fragment kinds are:
+//
+// * name A <name>, such as 6foobar or St3__1
+// * type A <type>, such as Ss or N4llvm9StringRefE
+// * encoding An <encoding> (a complete mangling without the leading _Z)
+//
+// For example:
+//
+// # Ignore int / long differences to treat symbols from 32-bit and 64-bit
+// # builds with differing size_t / ptrdiff_t / intptr_t as equivalent.
+// type i l
+// type j m
+//
+// # Ignore differences between libc++ and libstdc++, and between libstdc++'s
+// # C++98 and C++11 ABIs.
+// name 3std St3__1
+// name 3std St7__cxx11
+//
+// # Remap a function overload to a specialization of a template (including
+// # any local symbols declared within it).
+// encoding N2NS1fEi N2NS1fIiEEvT_
+//
+// # Substitutions must be remapped separately from namespace 'std' for now.
+// name Sa NSt3__19allocatorE
+// name Sb NSt3__112basic_stringE
+// type Ss NSt3__112basic_stringIcSt11char_traitsIcESaE
+// # ...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H
+#define LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ItaniumManglingCanonicalizer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+class SymbolRemappingParseError : public ErrorInfo<SymbolRemappingParseError> {
+public:
+ SymbolRemappingParseError(StringRef File, int64_t Line, Twine Message)
+ : File(File), Line(Line), Message(Message.str()) {}
+
+ void log(llvm::raw_ostream &OS) const override {
+ OS << File << ':' << Line << ": " << Message;
+ }
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+
+ StringRef getFileName() const { return File; }
+ int64_t getLineNum() const { return Line; }
+ StringRef getMessage() const { return Message; }
+
+ static char ID;
+
+private:
+ std::string File;
+ int64_t Line;
+ std::string Message;
+};
+
+/// Reader for symbol remapping files.
+///
+/// Remaps the symbol names in profile data to match those in the program
+/// according to a set of rules specified in a given file.
+class SymbolRemappingReader {
+public:
+ /// Read remappings from the given buffer, which must live as long as
+ /// the remapper.
+ Error read(MemoryBuffer &B);
+
+ /// A Key represents an equivalence class of symbol names.
+ using Key = uintptr_t;
+
+ /// Construct a key for the given symbol, or return an existing one if an
+ /// equivalent name has already been inserted. The symbol name must live
+ /// as long as the remapper.
+ ///
+ /// The result will be Key() if the name cannot be remapped (typically
+ /// because it is not a valid mangled name).
+ Key insert(StringRef FunctionName) {
+ return Canonicalizer.canonicalize(FunctionName);
+ }
+
+ /// Map the given symbol name into the key for the corresponding equivalence
+ /// class.
+ ///
+ /// The result will typically be Key() if no equivalent symbol has been
+ /// inserted, but this is not guaranteed: a Key different from all keys ever
+ /// returned by \c insert may be returned instead.
+ Key lookup(StringRef FunctionName) {
+ return Canonicalizer.lookup(FunctionName);
+ }
+
+private:
+ ItaniumManglingCanonicalizer Canonicalizer;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H
diff --git a/contrib/llvm/include/llvm/Support/TargetOpcodes.def b/contrib/llvm/include/llvm/Support/TargetOpcodes.def
index 63491a5f01d2..3e8193a5cdcf 100644
--- a/contrib/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/contrib/llvm/include/llvm/Support/TargetOpcodes.def
@@ -258,6 +258,17 @@ HANDLE_TARGET_OPCODE(G_INSERT)
/// larger register.
HANDLE_TARGET_OPCODE(G_MERGE_VALUES)
+/// Generic instruction to create a vector value from a number of scalar
+/// components.
+HANDLE_TARGET_OPCODE(G_BUILD_VECTOR)
+
+/// Generic instruction to create a vector value from a number of scalar
+/// components, which have types larger than the result vector elt type.
+HANDLE_TARGET_OPCODE(G_BUILD_VECTOR_TRUNC)
+
+/// Generic instruction to create a vector by concatenating multiple vectors.
+HANDLE_TARGET_OPCODE(G_CONCAT_VECTORS)
+
/// Generic pointer to int conversion.
HANDLE_TARGET_OPCODE(G_PTRTOINT)
@@ -268,6 +279,12 @@ HANDLE_TARGET_OPCODE(G_INTTOPTR)
/// COPY is the relevant instruction.
HANDLE_TARGET_OPCODE(G_BITCAST)
+/// INTRINSIC trunc intrinsic.
+HANDLE_TARGET_OPCODE(G_INTRINSIC_TRUNC)
+
+/// INTRINSIC round intrinsic.
+HANDLE_TARGET_OPCODE(G_INTRINSIC_ROUND)
+
/// Generic load (including anyext load)
HANDLE_TARGET_OPCODE(G_LOAD)
@@ -356,10 +373,18 @@ HANDLE_TARGET_OPCODE(G_FCMP)
/// Generic select.
HANDLE_TARGET_OPCODE(G_SELECT)
+/// Generic unsigned add instruction, consuming the normal operands and
+/// producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_UADDO)
+
/// Generic unsigned add instruction, consuming the normal operands plus a carry
/// flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_UADDE)
+/// Generic unsigned sub instruction, consuming the normal operands and
+/// producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_USUBO)
+
/// Generic unsigned subtract instruction, consuming the normal operands plus a
/// carry flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_USUBE)
@@ -368,10 +393,18 @@ HANDLE_TARGET_OPCODE(G_USUBE)
/// flag.
HANDLE_TARGET_OPCODE(G_SADDO)
+/// Generic signed add instruction, consuming the normal operands plus a carry
+/// flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_SADDE)
+
/// Generic signed subtract instruction, producing the result and a signed
/// overflow flag.
HANDLE_TARGET_OPCODE(G_SSUBO)
+/// Generic signed sub instruction, consuming the normal operands plus a carry
+/// flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_SSUBE)
+
/// Generic unsigned multiply instruction, producing the result and a signed
/// overflow flag.
HANDLE_TARGET_OPCODE(G_UMULO)
@@ -421,6 +454,9 @@ HANDLE_TARGET_OPCODE(G_FLOG)
/// Floating point base-2 logarithm of a value.
HANDLE_TARGET_OPCODE(G_FLOG2)
+/// Floating point base-10 logarithm of a value.
+HANDLE_TARGET_OPCODE(G_FLOG10)
+
/// Generic FP negation.
HANDLE_TARGET_OPCODE(G_FNEG)
@@ -464,9 +500,27 @@ HANDLE_TARGET_OPCODE(G_EXTRACT_VECTOR_ELT)
/// Generic shufflevector.
HANDLE_TARGET_OPCODE(G_SHUFFLE_VECTOR)
+/// Generic count trailing zeroes.
+HANDLE_TARGET_OPCODE(G_CTTZ)
+
+/// Same as above, undefined for zero inputs.
+HANDLE_TARGET_OPCODE(G_CTTZ_ZERO_UNDEF)
+
+/// Generic count leading zeroes.
+HANDLE_TARGET_OPCODE(G_CTLZ)
+
+/// Same as above, undefined for zero inputs.
+HANDLE_TARGET_OPCODE(G_CTLZ_ZERO_UNDEF)
+
+/// Generic count bits.
+HANDLE_TARGET_OPCODE(G_CTPOP)
+
/// Generic byte swap.
HANDLE_TARGET_OPCODE(G_BSWAP)
+/// Floating point ceil.
+HANDLE_TARGET_OPCODE(G_FCEIL)
+
/// Generic AddressSpaceCast.
HANDLE_TARGET_OPCODE(G_ADDRSPACE_CAST)
diff --git a/contrib/llvm/include/llvm/Support/TargetParser.h b/contrib/llvm/include/llvm/Support/TargetParser.h
index 08ad42dda3eb..ace11ed410a3 100644
--- a/contrib/llvm/include/llvm/Support/TargetParser.h
+++ b/contrib/llvm/include/llvm/Support/TargetParser.h
@@ -18,211 +18,20 @@
// FIXME: vector is used because that's what clang uses for subtarget feature
// lists, but SmallVector would probably be better
#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ARMTargetParser.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include <vector>
namespace llvm {
class StringRef;
-// Target specific information into their own namespaces. These should be
-// generated from TableGen because the information is already there, and there
-// is where new information about targets will be added.
+// Target specific information in their own namespaces.
+// (ARM/AArch64 are declared in ARM/AArch64TargetParser.h)
+// These should be generated from TableGen because the information is already
+// there, and there is where new information about targets will be added.
// FIXME: To TableGen this we need to make some table generated files available
// even if the back-end is not compiled with LLVM, plus we need to create a new
// back-end to TableGen to create these clean tables.
-namespace ARM {
-
-// FPU Version
-enum class FPUVersion {
- NONE,
- VFPV2,
- VFPV3,
- VFPV3_FP16,
- VFPV4,
- VFPV5
-};
-
-// An FPU name restricts the FPU in one of three ways:
-enum class FPURestriction {
- None = 0, ///< No restriction
- D16, ///< Only 16 D registers
- SP_D16 ///< Only single-precision instructions, with 16 D registers
-};
-
-// An FPU name implies one of three levels of Neon support:
-enum class NeonSupportLevel {
- None = 0, ///< No Neon
- Neon, ///< Neon
- Crypto ///< Neon with Crypto
-};
-
-// FPU names.
-enum FPUKind {
-#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) KIND,
-#include "ARMTargetParser.def"
- FK_LAST
-};
-
-// Arch names.
-enum class ArchKind {
-#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
-#include "ARMTargetParser.def"
-};
-
-// Arch extension modifiers for CPUs.
-enum ArchExtKind : unsigned {
- AEK_INVALID = 0,
- AEK_NONE = 1,
- AEK_CRC = 1 << 1,
- AEK_CRYPTO = 1 << 2,
- AEK_FP = 1 << 3,
- AEK_HWDIVTHUMB = 1 << 4,
- AEK_HWDIVARM = 1 << 5,
- AEK_MP = 1 << 6,
- AEK_SIMD = 1 << 7,
- AEK_SEC = 1 << 8,
- AEK_VIRT = 1 << 9,
- AEK_DSP = 1 << 10,
- AEK_FP16 = 1 << 11,
- AEK_RAS = 1 << 12,
- AEK_SVE = 1 << 13,
- AEK_DOTPROD = 1 << 14,
- AEK_SHA2 = 1 << 15,
- AEK_AES = 1 << 16,
- // Unsupported extensions.
- AEK_OS = 0x8000000,
- AEK_IWMMXT = 0x10000000,
- AEK_IWMMXT2 = 0x20000000,
- AEK_MAVERICK = 0x40000000,
- AEK_XSCALE = 0x80000000,
-};
-
-// ISA kinds.
-enum class ISAKind { INVALID = 0, ARM, THUMB, AARCH64 };
-
-// Endianness
-// FIXME: BE8 vs. BE32?
-enum class EndianKind { INVALID = 0, LITTLE, BIG };
-
-// v6/v7/v8 Profile
-enum class ProfileKind { INVALID = 0, A, R, M };
-
-StringRef getCanonicalArchName(StringRef Arch);
-
-// Information by ID
-StringRef getFPUName(unsigned FPUKind);
-FPUVersion getFPUVersion(unsigned FPUKind);
-NeonSupportLevel getFPUNeonSupportLevel(unsigned FPUKind);
-FPURestriction getFPURestriction(unsigned FPUKind);
-
-// FIXME: These should be moved to TargetTuple once it exists
-bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
-bool getHWDivFeatures(unsigned HWDivKind, std::vector<StringRef> &Features);
-bool getExtensionFeatures(unsigned Extensions,
- std::vector<StringRef> &Features);
-
-StringRef getArchName(ArchKind AK);
-unsigned getArchAttr(ArchKind AK);
-StringRef getCPUAttr(ArchKind AK);
-StringRef getSubArch(ArchKind AK);
-StringRef getArchExtName(unsigned ArchExtKind);
-StringRef getArchExtFeature(StringRef ArchExt);
-StringRef getHWDivName(unsigned HWDivKind);
-
-// Information by Name
-unsigned getDefaultFPU(StringRef CPU, ArchKind AK);
-unsigned getDefaultExtensions(StringRef CPU, ArchKind AK);
-StringRef getDefaultCPU(StringRef Arch);
-
-// Parser
-unsigned parseHWDiv(StringRef HWDiv);
-unsigned parseFPU(StringRef FPU);
-ArchKind parseArch(StringRef Arch);
-unsigned parseArchExt(StringRef ArchExt);
-ArchKind parseCPUArch(StringRef CPU);
-void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
-ISAKind parseArchISA(StringRef Arch);
-EndianKind parseArchEndian(StringRef Arch);
-ProfileKind parseArchProfile(StringRef Arch);
-unsigned parseArchVersion(StringRef Arch);
-
-StringRef computeDefaultTargetABI(const Triple &TT, StringRef CPU);
-
-} // namespace ARM
-
-// FIXME:This should be made into class design,to avoid dupplication.
-namespace AArch64 {
-
-// Arch names.
-enum class ArchKind {
-#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
-#include "AArch64TargetParser.def"
-};
-
-// Arch extension modifiers for CPUs.
-enum ArchExtKind : unsigned {
- AEK_INVALID = 0,
- AEK_NONE = 1,
- AEK_CRC = 1 << 1,
- AEK_CRYPTO = 1 << 2,
- AEK_FP = 1 << 3,
- AEK_SIMD = 1 << 4,
- AEK_FP16 = 1 << 5,
- AEK_PROFILE = 1 << 6,
- AEK_RAS = 1 << 7,
- AEK_LSE = 1 << 8,
- AEK_SVE = 1 << 9,
- AEK_DOTPROD = 1 << 10,
- AEK_RCPC = 1 << 11,
- AEK_RDM = 1 << 12,
- AEK_SM4 = 1 << 13,
- AEK_SHA3 = 1 << 14,
- AEK_SHA2 = 1 << 15,
- AEK_AES = 1 << 16,
-};
-
-StringRef getCanonicalArchName(StringRef Arch);
-
-// Information by ID
-StringRef getFPUName(unsigned FPUKind);
-ARM::FPUVersion getFPUVersion(unsigned FPUKind);
-ARM::NeonSupportLevel getFPUNeonSupportLevel(unsigned FPUKind);
-ARM::FPURestriction getFPURestriction(unsigned FPUKind);
-
-// FIXME: These should be moved to TargetTuple once it exists
-bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
-bool getExtensionFeatures(unsigned Extensions,
- std::vector<StringRef> &Features);
-bool getArchFeatures(ArchKind AK, std::vector<StringRef> &Features);
-
-StringRef getArchName(ArchKind AK);
-unsigned getArchAttr(ArchKind AK);
-StringRef getCPUAttr(ArchKind AK);
-StringRef getSubArch(ArchKind AK);
-StringRef getArchExtName(unsigned ArchExtKind);
-StringRef getArchExtFeature(StringRef ArchExt);
-unsigned checkArchVersion(StringRef Arch);
-
-// Information by Name
-unsigned getDefaultFPU(StringRef CPU, ArchKind AK);
-unsigned getDefaultExtensions(StringRef CPU, ArchKind AK);
-StringRef getDefaultCPU(StringRef Arch);
-AArch64::ArchKind getCPUArchKind(StringRef CPU);
-
-// Parser
-unsigned parseFPU(StringRef FPU);
-AArch64::ArchKind parseArch(StringRef Arch);
-ArchExtKind parseArchExt(StringRef ArchExt);
-ArchKind parseCPUArch(StringRef CPU);
-void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
-ARM::ISAKind parseArchISA(StringRef Arch);
-ARM::EndianKind parseArchEndian(StringRef Arch);
-ARM::ProfileKind parseArchProfile(StringRef Arch);
-unsigned parseArchVersion(StringRef Arch);
-
-bool isX18ReservedByDefault(const Triple &TT);
-
-} // namespace AArch64
-
namespace X86 {
// This should be kept in sync with libcc/compiler-rt as its included by clang
@@ -266,6 +75,96 @@ enum ProcessorFeatures {
} // namespace X86
+namespace AMDGPU {
+
+/// GPU kinds supported by the AMDGPU target.
+enum GPUKind : uint32_t {
+ // Not specified processor.
+ GK_NONE = 0,
+
+ // R600-based processors.
+ GK_R600 = 1,
+ GK_R630 = 2,
+ GK_RS880 = 3,
+ GK_RV670 = 4,
+ GK_RV710 = 5,
+ GK_RV730 = 6,
+ GK_RV770 = 7,
+ GK_CEDAR = 8,
+ GK_CYPRESS = 9,
+ GK_JUNIPER = 10,
+ GK_REDWOOD = 11,
+ GK_SUMO = 12,
+ GK_BARTS = 13,
+ GK_CAICOS = 14,
+ GK_CAYMAN = 15,
+ GK_TURKS = 16,
+
+ GK_R600_FIRST = GK_R600,
+ GK_R600_LAST = GK_TURKS,
+
+ // AMDGCN-based processors.
+ GK_GFX600 = 32,
+ GK_GFX601 = 33,
+
+ GK_GFX700 = 40,
+ GK_GFX701 = 41,
+ GK_GFX702 = 42,
+ GK_GFX703 = 43,
+ GK_GFX704 = 44,
+
+ GK_GFX801 = 50,
+ GK_GFX802 = 51,
+ GK_GFX803 = 52,
+ GK_GFX810 = 53,
+
+ GK_GFX900 = 60,
+ GK_GFX902 = 61,
+ GK_GFX904 = 62,
+ GK_GFX906 = 63,
+ GK_GFX909 = 65,
+
+ GK_AMDGCN_FIRST = GK_GFX600,
+ GK_AMDGCN_LAST = GK_GFX909,
+};
+
+/// Instruction set architecture version.
+struct IsaVersion {
+ unsigned Major;
+ unsigned Minor;
+ unsigned Stepping;
+};
+
+// This isn't comprehensive for now, just things that are needed from the
+// frontend driver.
+enum ArchFeatureKind : uint32_t {
+ FEATURE_NONE = 0,
+
+ // These features only exist for r600, and are implied true for amdgcn.
+ FEATURE_FMA = 1 << 1,
+ FEATURE_LDEXP = 1 << 2,
+ FEATURE_FP64 = 1 << 3,
+
+ // Common features.
+ FEATURE_FAST_FMA_F32 = 1 << 4,
+ FEATURE_FAST_DENORMAL_F32 = 1 << 5
+};
+
+StringRef getArchNameAMDGCN(GPUKind AK);
+StringRef getArchNameR600(GPUKind AK);
+StringRef getCanonicalArchName(StringRef Arch);
+GPUKind parseArchAMDGCN(StringRef CPU);
+GPUKind parseArchR600(StringRef CPU);
+unsigned getArchAttrAMDGCN(GPUKind AK);
+unsigned getArchAttrR600(GPUKind AK);
+
+void fillValidArchListAMDGCN(SmallVectorImpl<StringRef> &Values);
+void fillValidArchListR600(SmallVectorImpl<StringRef> &Values);
+
+IsaVersion getIsaVersion(StringRef GPU);
+
+} // namespace AMDGPU
+
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Support/Threading.h b/contrib/llvm/include/llvm/Support/Threading.h
index e8021f648b0d..ba7ece5e72ba 100644
--- a/contrib/llvm/include/llvm/Support/Threading.h
+++ b/contrib/llvm/include/llvm/Support/Threading.h
@@ -27,7 +27,8 @@
#define LLVM_THREADING_USE_STD_CALL_ONCE 1
#elif defined(LLVM_ON_UNIX) && \
(defined(_LIBCPP_VERSION) || \
- !(defined(__NetBSD__) || defined(__OpenBSD__) || defined(__ppc__)))
+ !(defined(__NetBSD__) || defined(__OpenBSD__) || \
+ (defined(__ppc__) || defined(__PPC__))))
// std::call_once from libc++ is used on all Unix platforms. Other
// implementations like libstdc++ are known to have problems on NetBSD,
// OpenBSD and PowerPC.
diff --git a/contrib/llvm/include/llvm/Support/Timer.h b/contrib/llvm/include/llvm/Support/Timer.h
index bfffbc3157b1..a11c3ce3ff22 100644
--- a/contrib/llvm/include/llvm/Support/Timer.h
+++ b/contrib/llvm/include/llvm/Support/Timer.h
@@ -206,15 +206,23 @@ public:
Description.assign(NewDescription.begin(), NewDescription.end());
}
- /// Print any started timers in this group and zero them.
+ /// Print any started timers in this group.
void print(raw_ostream &OS);
- /// This static method prints all timers and clears them all out.
+ /// Clear all timers in this group.
+ void clear();
+
+ /// This static method prints all timers.
static void printAll(raw_ostream &OS);
+ /// Clear out all timers. This is mostly used to disable automatic
+ /// printing on shutdown, when timers have already been printed explicitly
+ /// using \c printAll or \c printJSONValues.
+ static void clearAll();
+
const char *printJSONValues(raw_ostream &OS, const char *delim);
- /// Prints all timers as JSON key/value pairs, and clears them all out.
+ /// Prints all timers as JSON key/value pairs.
static const char *printAllJSONValues(raw_ostream &OS, const char *delim);
/// Ensure global timer group lists are initialized. This function is mostly
diff --git a/contrib/llvm/include/llvm/Support/VirtualFileSystem.h b/contrib/llvm/include/llvm/Support/VirtualFileSystem.h
new file mode 100644
index 000000000000..61c3d2f46e9c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -0,0 +1,764 @@
+//===- VirtualFileSystem.h - Virtual File System Layer ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Defines the virtual file system interface vfs::FileSystem.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_VIRTUALFILESYSTEM_H
+#define LLVM_SUPPORT_VIRTUALFILESYSTEM_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cassert>
+#include <cstdint>
+#include <ctime>
+#include <memory>
+#include <stack>
+#include <string>
+#include <system_error>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class MemoryBuffer;
+
+namespace vfs {
+
+/// The result of a \p status operation.
+class Status {
+ std::string Name;
+ llvm::sys::fs::UniqueID UID;
+ llvm::sys::TimePoint<> MTime;
+ uint32_t User;
+ uint32_t Group;
+ uint64_t Size;
+ llvm::sys::fs::file_type Type = llvm::sys::fs::file_type::status_error;
+ llvm::sys::fs::perms Perms;
+
+public:
+ // FIXME: remove when files support multiple names
+ bool IsVFSMapped = false;
+
+ Status() = default;
+ Status(const llvm::sys::fs::file_status &Status);
+ Status(StringRef Name, llvm::sys::fs::UniqueID UID,
+ llvm::sys::TimePoint<> MTime, uint32_t User, uint32_t Group,
+ uint64_t Size, llvm::sys::fs::file_type Type,
+ llvm::sys::fs::perms Perms);
+
+ /// Get a copy of a Status with a different name.
+ static Status copyWithNewName(const Status &In, StringRef NewName);
+ static Status copyWithNewName(const llvm::sys::fs::file_status &In,
+ StringRef NewName);
+
+ /// Returns the name that should be used for this file or directory.
+ StringRef getName() const { return Name; }
+
+ /// @name Status interface from llvm::sys::fs
+ /// @{
+ llvm::sys::fs::file_type getType() const { return Type; }
+ llvm::sys::fs::perms getPermissions() const { return Perms; }
+ llvm::sys::TimePoint<> getLastModificationTime() const { return MTime; }
+ llvm::sys::fs::UniqueID getUniqueID() const { return UID; }
+ uint32_t getUser() const { return User; }
+ uint32_t getGroup() const { return Group; }
+ uint64_t getSize() const { return Size; }
+ /// @}
+ /// @name Status queries
+ /// These are static queries in llvm::sys::fs.
+ /// @{
+ bool equivalent(const Status &Other) const;
+ bool isDirectory() const;
+ bool isRegularFile() const;
+ bool isOther() const;
+ bool isSymlink() const;
+ bool isStatusKnown() const;
+ bool exists() const;
+ /// @}
+};
+
+/// Represents an open file.
+class File {
+public:
+ /// Destroy the file after closing it (if open).
+ /// Sub-classes should generally call close() inside their destructors. We
+ /// cannot do that from the base class, since close is virtual.
+ virtual ~File();
+
+ /// Get the status of the file.
+ virtual llvm::ErrorOr<Status> status() = 0;
+
+ /// Get the name of the file
+ virtual llvm::ErrorOr<std::string> getName() {
+ if (auto Status = status())
+ return Status->getName().str();
+ else
+ return Status.getError();
+ }
+
+ /// Get the contents of the file as a \p MemoryBuffer.
+ virtual llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize = -1,
+ bool RequiresNullTerminator = true, bool IsVolatile = false) = 0;
+
+ /// Closes the file.
+ virtual std::error_code close() = 0;
+};
+
+/// A member of a directory, yielded by a directory_iterator.
+/// Only information available on most platforms is included.
+class directory_entry {
+ std::string Path;
+ llvm::sys::fs::file_type Type;
+
+public:
+ directory_entry() = default;
+ directory_entry(std::string Path, llvm::sys::fs::file_type Type)
+ : Path(std::move(Path)), Type(Type) {}
+
+ llvm::StringRef path() const { return Path; }
+ llvm::sys::fs::file_type type() const { return Type; }
+};
+
+namespace detail {
+
+/// An interface for virtual file systems to provide an iterator over the
+/// (non-recursive) contents of a directory.
+struct DirIterImpl {
+ virtual ~DirIterImpl();
+
+ /// Sets \c CurrentEntry to the next entry in the directory on success,
+ /// to directory_entry() at end, or returns a system-defined \c error_code.
+ virtual std::error_code increment() = 0;
+
+ directory_entry CurrentEntry;
+};
+
+} // namespace detail
+
+/// An input iterator over the entries in a virtual path, similar to
+/// llvm::sys::fs::directory_iterator.
+class directory_iterator {
+ std::shared_ptr<detail::DirIterImpl> Impl; // Input iterator semantics on copy
+
+public:
+ directory_iterator(std::shared_ptr<detail::DirIterImpl> I)
+ : Impl(std::move(I)) {
+ assert(Impl.get() != nullptr && "requires non-null implementation");
+ if (Impl->CurrentEntry.path().empty())
+ Impl.reset(); // Normalize the end iterator to Impl == nullptr.
+ }
+
+ /// Construct an 'end' iterator.
+ directory_iterator() = default;
+
+ /// Equivalent to operator++, with an error code.
+ directory_iterator &increment(std::error_code &EC) {
+ assert(Impl && "attempting to increment past end");
+ EC = Impl->increment();
+ if (Impl->CurrentEntry.path().empty())
+ Impl.reset(); // Normalize the end iterator to Impl == nullptr.
+ return *this;
+ }
+
+ const directory_entry &operator*() const { return Impl->CurrentEntry; }
+ const directory_entry *operator->() const { return &Impl->CurrentEntry; }
+
+ bool operator==(const directory_iterator &RHS) const {
+ if (Impl && RHS.Impl)
+ return Impl->CurrentEntry.path() == RHS.Impl->CurrentEntry.path();
+ return !Impl && !RHS.Impl;
+ }
+ bool operator!=(const directory_iterator &RHS) const {
+ return !(*this == RHS);
+ }
+};
+
+class FileSystem;
+
+namespace detail {
+
+/// Keeps state for the recursive_directory_iterator.
+struct RecDirIterState {
+ std::stack<directory_iterator, std::vector<directory_iterator>> Stack;
+ bool HasNoPushRequest = false;
+};
+
+} // end namespace detail
+
+/// An input iterator over the recursive contents of a virtual path,
+/// similar to llvm::sys::fs::recursive_directory_iterator.
+class recursive_directory_iterator {
+ FileSystem *FS;
+ std::shared_ptr<detail::RecDirIterState>
+ State; // Input iterator semantics on copy.
+
+public:
+ recursive_directory_iterator(FileSystem &FS, const Twine &Path,
+ std::error_code &EC);
+
+ /// Construct an 'end' iterator.
+ recursive_directory_iterator() = default;
+
+ /// Equivalent to operator++, with an error code.
+ recursive_directory_iterator &increment(std::error_code &EC);
+
+ const directory_entry &operator*() const { return *State->Stack.top(); }
+ const directory_entry *operator->() const { return &*State->Stack.top(); }
+
+ bool operator==(const recursive_directory_iterator &Other) const {
+ return State == Other.State; // identity
+ }
+ bool operator!=(const recursive_directory_iterator &RHS) const {
+ return !(*this == RHS);
+ }
+
+ /// Gets the current level. Starting path is at level 0.
+ int level() const {
+ assert(!State->Stack.empty() &&
+ "Cannot get level without any iteration state");
+ return State->Stack.size() - 1;
+ }
+
+ void no_push() { State->HasNoPushRequest = true; }
+};
+
+/// The virtual file system interface.
+class FileSystem : public llvm::ThreadSafeRefCountedBase<FileSystem> {
+public:
+ virtual ~FileSystem();
+
+ /// Get the status of the entry at \p Path, if one exists.
+ virtual llvm::ErrorOr<Status> status(const Twine &Path) = 0;
+
+ /// Get a \p File object for the file at \p Path, if one exists.
+ virtual llvm::ErrorOr<std::unique_ptr<File>>
+ openFileForRead(const Twine &Path) = 0;
+
+ /// This is a convenience method that opens a file, gets its content and then
+ /// closes the file.
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBufferForFile(const Twine &Name, int64_t FileSize = -1,
+ bool RequiresNullTerminator = true, bool IsVolatile = false);
+
+ /// Get a directory_iterator for \p Dir.
+ /// \note The 'end' iterator is directory_iterator().
+ virtual directory_iterator dir_begin(const Twine &Dir,
+ std::error_code &EC) = 0;
+
+ /// Set the working directory. This will affect all following operations on
+ /// this file system and may propagate down for nested file systems.
+ virtual std::error_code setCurrentWorkingDirectory(const Twine &Path) = 0;
+
+ /// Get the working directory of this file system.
+ virtual llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const = 0;
+
+ /// Gets real path of \p Path e.g. collapse all . and .. patterns, resolve
+ /// symlinks. For real file system, this uses `llvm::sys::fs::real_path`.
+ /// This returns errc::operation_not_permitted if not implemented by subclass.
+ virtual std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const;
+
+ /// Check whether a file exists. Provided for convenience.
+ bool exists(const Twine &Path);
+
+ /// Is the file mounted on a local filesystem?
+ virtual std::error_code isLocal(const Twine &Path, bool &Result);
+
+ /// Make \a Path an absolute path.
+ ///
+ /// Makes \a Path absolute using the current directory if it is not already.
+ /// An empty \a Path will result in the current directory.
+ ///
+ /// /absolute/path => /absolute/path
+ /// relative/../path => <current-directory>/relative/../path
+ ///
+ /// \param Path A path that is modified to be an absolute path.
+ /// \returns success if \a path has been made absolute, otherwise a
+ /// platform-specific error_code.
+ std::error_code makeAbsolute(SmallVectorImpl<char> &Path) const;
+};
+
+/// Gets an \p vfs::FileSystem for the 'real' file system, as seen by
+/// the operating system.
+IntrusiveRefCntPtr<FileSystem> getRealFileSystem();
+
+/// A file system that allows overlaying one \p AbstractFileSystem on top
+/// of another.
+///
+/// Consists of a stack of >=1 \p FileSystem objects, which are treated as being
+/// one merged file system. When there is a directory that exists in more than
+/// one file system, the \p OverlayFileSystem contains a directory containing
+/// the union of their contents. The attributes (permissions, etc.) of the
+/// top-most (most recently added) directory are used. When there is a file
+/// that exists in more than one file system, the file in the top-most file
+/// system overrides the other(s).
+class OverlayFileSystem : public FileSystem {
+ using FileSystemList = SmallVector<IntrusiveRefCntPtr<FileSystem>, 1>;
+
+ /// The stack of file systems, implemented as a list in order of
+ /// their addition.
+ FileSystemList FSList;
+
+public:
+ OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> Base);
+
+ /// Pushes a file system on top of the stack.
+ void pushOverlay(IntrusiveRefCntPtr<FileSystem> FS);
+
+ llvm::ErrorOr<Status> status(const Twine &Path) override;
+ llvm::ErrorOr<std::unique_ptr<File>>
+ openFileForRead(const Twine &Path) override;
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+ std::error_code isLocal(const Twine &Path, bool &Result) override;
+ std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const override;
+
+ using iterator = FileSystemList::reverse_iterator;
+ using const_iterator = FileSystemList::const_reverse_iterator;
+
+ /// Get an iterator pointing to the most recently added file system.
+ iterator overlays_begin() { return FSList.rbegin(); }
+ const_iterator overlays_begin() const { return FSList.rbegin(); }
+
+ /// Get an iterator pointing one-past the least recently added file
+ /// system.
+ iterator overlays_end() { return FSList.rend(); }
+ const_iterator overlays_end() const { return FSList.rend(); }
+};
+
+/// By default, this delegates all calls to the underlying file system. This
+/// is useful when derived file systems want to override some calls and still
+/// proxy other calls.
+class ProxyFileSystem : public FileSystem {
+public:
+ explicit ProxyFileSystem(IntrusiveRefCntPtr<FileSystem> FS)
+ : FS(std::move(FS)) {}
+
+ llvm::ErrorOr<Status> status(const Twine &Path) override {
+ return FS->status(Path);
+ }
+ llvm::ErrorOr<std::unique_ptr<File>>
+ openFileForRead(const Twine &Path) override {
+ return FS->openFileForRead(Path);
+ }
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override {
+ return FS->dir_begin(Dir, EC);
+ }
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
+ return FS->getCurrentWorkingDirectory();
+ }
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
+ return FS->setCurrentWorkingDirectory(Path);
+ }
+ std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const override {
+ return FS->getRealPath(Path, Output);
+ }
+ std::error_code isLocal(const Twine &Path, bool &Result) override {
+ return FS->isLocal(Path, Result);
+ }
+
+protected:
+ FileSystem &getUnderlyingFS() { return *FS; }
+
+private:
+ IntrusiveRefCntPtr<FileSystem> FS;
+
+ virtual void anchor();
+};
+
+namespace detail {
+
+class InMemoryDirectory;
+class InMemoryFile;
+
+} // namespace detail
+
+/// An in-memory file system.
+class InMemoryFileSystem : public FileSystem {
+ std::unique_ptr<detail::InMemoryDirectory> Root;
+ std::string WorkingDirectory;
+ bool UseNormalizedPaths = true;
+
+ /// If HardLinkTarget is non-null, a hardlink is created to the To path which
+ /// must be a file. If it is null then it adds the file as the public addFile.
+ bool addFile(const Twine &Path, time_t ModificationTime,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ Optional<uint32_t> User, Optional<uint32_t> Group,
+ Optional<llvm::sys::fs::file_type> Type,
+ Optional<llvm::sys::fs::perms> Perms,
+ const detail::InMemoryFile *HardLinkTarget);
+
+public:
+ explicit InMemoryFileSystem(bool UseNormalizedPaths = true);
+ ~InMemoryFileSystem() override;
+
+ /// Add a file containing a buffer or a directory to the VFS with a
+ /// path. The VFS owns the buffer. If present, User, Group, Type
+ /// and Perms apply to the newly-created file or directory.
+ /// \return true if the file or directory was successfully added,
+ /// false if the file or directory already exists in the file system with
+ /// different contents.
+ bool addFile(const Twine &Path, time_t ModificationTime,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ Optional<uint32_t> User = None, Optional<uint32_t> Group = None,
+ Optional<llvm::sys::fs::file_type> Type = None,
+ Optional<llvm::sys::fs::perms> Perms = None);
+
+ /// Add a hard link to a file.
+ /// Here hard links are not intended to be fully equivalent to the classical
+ /// filesystem. Both the hard link and the file share the same buffer and
+ /// status (and thus have the same UniqueID). Because of this there is no way
+ /// to distinguish between the link and the file after the link has been
+ /// added.
+ ///
+ /// The To path must be an existing file or a hardlink. The From file must not
+ /// have been added before. The To Path must not be a directory. The From Node
+ /// is added as a hard link which points to the resolved file of To Node.
+ /// \return true if the above condition is satisfied and hardlink was
+ /// successfully created, false otherwise.
+ bool addHardLink(const Twine &From, const Twine &To);
+
+ /// Add a buffer to the VFS with a path. The VFS does not own the buffer.
+ /// If present, User, Group, Type and Perms apply to the newly-created file
+ /// or directory.
+ /// \return true if the file or directory was successfully added,
+ /// false if the file or directory already exists in the file system with
+ /// different contents.
+ bool addFileNoOwn(const Twine &Path, time_t ModificationTime,
+ llvm::MemoryBuffer *Buffer, Optional<uint32_t> User = None,
+ Optional<uint32_t> Group = None,
+ Optional<llvm::sys::fs::file_type> Type = None,
+ Optional<llvm::sys::fs::perms> Perms = None);
+
+ std::string toString() const;
+
+ /// Return true if this file system normalizes . and .. in paths.
+ bool useNormalizedPaths() const { return UseNormalizedPaths; }
+
+ llvm::ErrorOr<Status> status(const Twine &Path) override;
+ llvm::ErrorOr<std::unique_ptr<File>>
+ openFileForRead(const Twine &Path) override;
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
+ return WorkingDirectory;
+ }
+ /// Canonicalizes \p Path by combining with the current working
+ /// directory and normalizing the path (e.g. remove dots). If the current
+ /// working directory is not set, this returns errc::operation_not_permitted.
+ ///
+ /// This doesn't resolve symlinks as they are not supported in in-memory file
+ /// system.
+ std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const override;
+ std::error_code isLocal(const Twine &Path, bool &Result) override;
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+};
+
+/// Get a globally unique ID for a virtual file or directory.
+llvm::sys::fs::UniqueID getNextVirtualUniqueID();
+
+/// Gets a \p FileSystem for a virtual file system described in YAML
+/// format.
+IntrusiveRefCntPtr<FileSystem>
+getVFSFromYAML(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ StringRef YAMLFilePath, void *DiagContext = nullptr,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS = getRealFileSystem());
+
+struct YAMLVFSEntry {
+ template <typename T1, typename T2>
+ YAMLVFSEntry(T1 &&VPath, T2 &&RPath)
+ : VPath(std::forward<T1>(VPath)), RPath(std::forward<T2>(RPath)) {}
+ std::string VPath;
+ std::string RPath;
+};
+
+class VFSFromYamlDirIterImpl;
+class RedirectingFileSystemParser;
+
+/// A virtual file system parsed from a YAML file.
+///
+/// Currently, this class allows creating virtual directories and mapping
+/// virtual file paths to existing external files, available in \c ExternalFS.
+///
+/// The basic structure of the parsed file is:
+/// \verbatim
+/// {
+/// 'version': <version number>,
+/// <optional configuration>
+/// 'roots': [
+/// <directory entries>
+/// ]
+/// }
+/// \endverbatim
+///
+/// All configuration options are optional.
+/// 'case-sensitive': <boolean, default=true>
+/// 'use-external-names': <boolean, default=true>
+/// 'overlay-relative': <boolean, default=false>
+/// 'fallthrough': <boolean, default=true>
+///
+/// Virtual directories are represented as
+/// \verbatim
+/// {
+/// 'type': 'directory',
+/// 'name': <string>,
+/// 'contents': [ <file or directory entries> ]
+/// }
+/// \endverbatim
+///
+/// The default attributes for virtual directories are:
+/// \verbatim
+/// MTime = now() when created
+/// Perms = 0777
+/// User = Group = 0
+/// Size = 0
+/// UniqueID = unspecified unique value
+/// \endverbatim
+///
+/// Re-mapped files are represented as
+/// \verbatim
+/// {
+/// 'type': 'file',
+/// 'name': <string>,
+/// 'use-external-name': <boolean> # Optional
+/// 'external-contents': <path to external file>
+/// }
+/// \endverbatim
+///
+/// and inherit their attributes from the external contents.
+///
+/// In both cases, the 'name' field may contain multiple path components (e.g.
+/// /path/to/file). However, any directory that contains more than one child
+/// must be uniquely represented by a directory entry.
+class RedirectingFileSystem : public vfs::FileSystem {
+public:
+ enum EntryKind { EK_Directory, EK_File };
+
+ /// A single file or directory in the VFS.
+ class Entry {
+ EntryKind Kind;
+ std::string Name;
+
+ public:
+ Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
+ virtual ~Entry() = default;
+
+ StringRef getName() const { return Name; }
+ EntryKind getKind() const { return Kind; }
+ };
+
+ class RedirectingDirectoryEntry : public Entry {
+ std::vector<std::unique_ptr<Entry>> Contents;
+ Status S;
+
+ public:
+ RedirectingDirectoryEntry(StringRef Name,
+ std::vector<std::unique_ptr<Entry>> Contents,
+ Status S)
+ : Entry(EK_Directory, Name), Contents(std::move(Contents)),
+ S(std::move(S)) {}
+ RedirectingDirectoryEntry(StringRef Name, Status S)
+ : Entry(EK_Directory, Name), S(std::move(S)) {}
+
+ Status getStatus() { return S; }
+
+ void addContent(std::unique_ptr<Entry> Content) {
+ Contents.push_back(std::move(Content));
+ }
+
+ Entry *getLastContent() const { return Contents.back().get(); }
+
+ using iterator = decltype(Contents)::iterator;
+
+ iterator contents_begin() { return Contents.begin(); }
+ iterator contents_end() { return Contents.end(); }
+
+ static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
+ };
+
+ class RedirectingFileEntry : public Entry {
+ public:
+ enum NameKind { NK_NotSet, NK_External, NK_Virtual };
+
+ private:
+ std::string ExternalContentsPath;
+ NameKind UseName;
+
+ public:
+ RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
+ NameKind UseName)
+ : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
+ UseName(UseName) {}
+
+ StringRef getExternalContentsPath() const { return ExternalContentsPath; }
+
+ /// whether to use the external path as the name for this file.
+ bool useExternalName(bool GlobalUseExternalName) const {
+ return UseName == NK_NotSet ? GlobalUseExternalName
+ : (UseName == NK_External);
+ }
+
+ NameKind getUseName() const { return UseName; }
+
+ static bool classof(const Entry *E) { return E->getKind() == EK_File; }
+ };
+
+private:
+ friend class VFSFromYamlDirIterImpl;
+ friend class RedirectingFileSystemParser;
+
+ /// The root(s) of the virtual file system.
+ std::vector<std::unique_ptr<Entry>> Roots;
+
+ /// The file system to use for external references.
+ IntrusiveRefCntPtr<FileSystem> ExternalFS;
+
+ /// If IsRelativeOverlay is set, this represents the directory
+ /// path that should be prefixed to each 'external-contents' entry
+ /// when reading from YAML files.
+ std::string ExternalContentsPrefixDir;
+
+ /// @name Configuration
+ /// @{
+
+ /// Whether to perform case-sensitive comparisons.
+ ///
+ /// Currently, case-insensitive matching only works correctly with ASCII.
+ bool CaseSensitive = true;
+
+ /// IsRelativeOverlay marks whether a ExternalContentsPrefixDir path must
+ /// be prefixed in every 'external-contents' when reading from YAML files.
+ bool IsRelativeOverlay = false;
+
+ /// Whether to use to use the value of 'external-contents' for the
+ /// names of files. This global value is overridable on a per-file basis.
+ bool UseExternalNames = true;
+
+ /// Whether to attempt a file lookup in external file system after it wasn't
+ /// found in VFS.
+ bool IsFallthrough = true;
+ /// @}
+
+ /// Virtual file paths and external files could be canonicalized without "..",
+ /// "." and "./" in their paths. FIXME: some unittests currently fail on
+ /// win32 when using remove_dots and remove_leading_dotslash on paths.
+ bool UseCanonicalizedPaths =
+#ifdef _WIN32
+ false;
+#else
+ true;
+#endif
+
+ RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
+ : ExternalFS(std::move(ExternalFS)) {}
+
+ /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
+ /// recursing into the contents of \p From if it is a directory.
+ ErrorOr<Entry *> lookupPath(llvm::sys::path::const_iterator Start,
+ llvm::sys::path::const_iterator End,
+ Entry *From) const;
+
+ /// Get the status of a given an \c Entry.
+ ErrorOr<Status> status(const Twine &Path, Entry *E);
+
+public:
+ /// Looks up \p Path in \c Roots.
+ ErrorOr<Entry *> lookupPath(const Twine &Path) const;
+
+ /// Parses \p Buffer, which is expected to be in YAML format and
+ /// returns a virtual file system representing its contents.
+ static RedirectingFileSystem *
+ create(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
+ void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);
+
+ ErrorOr<Status> status(const Twine &Path) override;
+ ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
+
+ std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const override;
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
+
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+
+ std::error_code isLocal(const Twine &Path, bool &Result) override;
+
+ directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+
+ void setExternalContentsPrefixDir(StringRef PrefixDir);
+
+ StringRef getExternalContentsPrefixDir() const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const;
+ LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const;
+#endif
+};
+
+/// Collect all pairs of <virtual path, real path> entries from the
+/// \p YAMLFilePath. This is used by the module dependency collector to forward
+/// the entries into the reproducer output VFS YAML file.
+void collectVFSFromYAML(
+ std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
+ SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
+ void *DiagContext = nullptr,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS = getRealFileSystem());
+
+class YAMLVFSWriter {
+ std::vector<YAMLVFSEntry> Mappings;
+ Optional<bool> IsCaseSensitive;
+ Optional<bool> IsOverlayRelative;
+ Optional<bool> UseExternalNames;
+ std::string OverlayDir;
+
+public:
+ YAMLVFSWriter() = default;
+
+ void addFileMapping(StringRef VirtualPath, StringRef RealPath);
+
+ void setCaseSensitivity(bool CaseSensitive) {
+ IsCaseSensitive = CaseSensitive;
+ }
+
+ void setUseExternalNames(bool UseExtNames) { UseExternalNames = UseExtNames; }
+
+ void setOverlayDir(StringRef OverlayDirectory) {
+ IsOverlayRelative = true;
+ OverlayDir.assign(OverlayDirectory.str());
+ }
+
+ const std::vector<YAMLVFSEntry> &getMappings() const { return Mappings; }
+
+ void write(llvm::raw_ostream &OS);
+};
+
+} // namespace vfs
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_VIRTUALFILESYSTEM_H
diff --git a/contrib/llvm/include/llvm/Support/Win64EH.h b/contrib/llvm/include/llvm/Support/Win64EH.h
index 928eb906de0c..e27bf1b3a1a5 100644
--- a/contrib/llvm/include/llvm/Support/Win64EH.h
+++ b/contrib/llvm/include/llvm/Support/Win64EH.h
@@ -33,7 +33,24 @@ enum UnwindOpcodes {
UOP_SaveNonVolBig,
UOP_SaveXMM128 = 8,
UOP_SaveXMM128Big,
- UOP_PushMachFrame
+ UOP_PushMachFrame,
+ // The following set of unwind opcodes is for ARM64. They are documented at
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
+ UOP_AllocMedium,
+ UOP_SaveFPLRX,
+ UOP_SaveFPLR,
+ UOP_SaveReg,
+ UOP_SaveRegX,
+ UOP_SaveRegP,
+ UOP_SaveRegPX,
+ UOP_SaveFReg,
+ UOP_SaveFRegX,
+ UOP_SaveFRegP,
+ UOP_SaveFRegPX,
+ UOP_SetFP,
+ UOP_AddFP,
+ UOP_Nop,
+ UOP_End
};
/// UnwindCode - This union describes a single operation in a function prolog,
diff --git a/contrib/llvm/include/llvm/Support/WithColor.h b/contrib/llvm/include/llvm/Support/WithColor.h
index 85fc5fa0cf14..76842d1c3dc8 100644
--- a/contrib/llvm/include/llvm/Support/WithColor.h
+++ b/contrib/llvm/include/llvm/Support/WithColor.h
@@ -29,23 +29,49 @@ enum class HighlightColor {
Macro,
Error,
Warning,
- Note
+ Note,
+ Remark
};
/// An RAII object that temporarily switches an output stream to a specific
/// color.
class WithColor {
raw_ostream &OS;
- /// Determine whether colors should be displayed.
- bool colorsEnabled(raw_ostream &OS);
+ bool DisableColors;
public:
/// To be used like this: WithColor(OS, HighlightColor::String) << "text";
- WithColor(raw_ostream &OS, HighlightColor S);
+ /// @param OS The output stream
+ /// @param S Symbolic name for syntax element to color
+ /// @param DisableColors Whether to ignore color changes regardless of -color
+ /// and support in OS
+ WithColor(raw_ostream &OS, HighlightColor S, bool DisableColors = false);
+ /// To be used like this: WithColor(OS, raw_ostream::Black) << "text";
+ /// @param OS The output stream
+ /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
+ /// change only the bold attribute, and keep colors untouched
+ /// @param Bold Bold/brighter text, default false
+ /// @param BG If true, change the background, default: change foreground
+ /// @param DisableColors Whether to ignore color changes regardless of -color
+ /// and support in OS
+ WithColor(raw_ostream &OS,
+ raw_ostream::Colors Color = raw_ostream::SAVEDCOLOR,
+ bool Bold = false, bool BG = false, bool DisableColors = false)
+ : OS(OS), DisableColors(DisableColors) {
+ changeColor(Color, Bold, BG);
+ }
~WithColor();
raw_ostream &get() { return OS; }
operator raw_ostream &() { return OS; }
+ template <typename T> WithColor &operator<<(T &O) {
+ OS << O;
+ return *this;
+ }
+ template <typename T> WithColor &operator<<(const T &O) {
+ OS << O;
+ return *this;
+ }
/// Convenience method for printing "error: " to stderr.
static raw_ostream &error();
@@ -53,13 +79,36 @@ public:
static raw_ostream &warning();
/// Convenience method for printing "note: " to stderr.
static raw_ostream &note();
+ /// Convenience method for printing "remark: " to stderr.
+ static raw_ostream &remark();
/// Convenience method for printing "error: " to the given stream.
- static raw_ostream &error(raw_ostream &OS, StringRef Prefix = "");
+ static raw_ostream &error(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Convenience method for printing "warning: " to the given stream.
- static raw_ostream &warning(raw_ostream &OS, StringRef Prefix = "");
+ static raw_ostream &warning(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
/// Convenience method for printing "note: " to the given stream.
- static raw_ostream &note(raw_ostream &OS, StringRef Prefix = "");
+ static raw_ostream &note(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
+ /// Convenience method for printing "remark: " to the given stream.
+ static raw_ostream &remark(raw_ostream &OS, StringRef Prefix = "",
+ bool DisableColors = false);
+
+ /// Determine whether colors are displayed.
+ bool colorsEnabled();
+
+ /// Change the color of text that will be output from this point forward.
+ /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
+ /// change only the bold attribute, and keep colors untouched
+ /// @param Bold Bold/brighter text, default false
+ /// @param BG If true, change the background, default: change foreground
+ WithColor &changeColor(raw_ostream::Colors Color, bool Bold = false,
+ bool BG = false);
+
+ /// Reset the colors to terminal defaults. Call this when you are done
+ /// outputting colored text, or before program exit.
+ WithColor &resetColor();
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h b/contrib/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
index 185b357efef5..466dd309909a 100644
--- a/contrib/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
@@ -414,7 +414,7 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_R16, "2-byte") \
ENUM_ENTRY(TYPE_R32, "4-byte") \
ENUM_ENTRY(TYPE_R64, "8-byte") \
- ENUM_ENTRY(TYPE_IMM, "immediate operand") \
+ ENUM_ENTRY(TYPE_IMM, "immediate operand") \
ENUM_ENTRY(TYPE_IMM3, "1-byte immediate operand between 0 and 7") \
ENUM_ENTRY(TYPE_IMM5, "1-byte immediate operand between 0 and 31") \
ENUM_ENTRY(TYPE_AVX512ICC, "1-byte immediate operand for AVX512 icmp") \
diff --git a/contrib/llvm/include/llvm/Support/X86TargetParser.def b/contrib/llvm/include/llvm/Support/X86TargetParser.def
index e4af0657a350..e9bede545d3f 100644
--- a/contrib/llvm/include/llvm/Support/X86TargetParser.def
+++ b/contrib/llvm/include/llvm/Support/X86TargetParser.def
@@ -34,17 +34,20 @@ X86_VENDOR(VENDOR_AMD, "amd")
#ifndef X86_CPU_TYPE
#define X86_CPU_TYPE(ARCHNAME, ENUM)
#endif
-X86_CPU_TYPE_COMPAT_WITH_ALIAS("bonnell", INTEL_BONNELL, "bonnell", "atom")
-X86_CPU_TYPE_COMPAT ("core2", INTEL_CORE2, "core2")
-X86_CPU_TYPE_COMPAT ("nehalem", INTEL_COREI7, "corei7")
-X86_CPU_TYPE_COMPAT_WITH_ALIAS("amdfam10", AMDFAM10H, "amdfam10h", "amdfam10")
-X86_CPU_TYPE_COMPAT_WITH_ALIAS("bdver1", AMDFAM15H, "amdfam15h", "amdfam15")
-X86_CPU_TYPE_COMPAT_WITH_ALIAS("silvermont", INTEL_SILVERMONT, "silvermont", "slm")
-X86_CPU_TYPE_COMPAT ("knl", INTEL_KNL, "knl")
-X86_CPU_TYPE_COMPAT ("btver1", AMD_BTVER1, "btver1")
-X86_CPU_TYPE_COMPAT ("btver2", AMD_BTVER2, "btver2")
-X86_CPU_TYPE_COMPAT ("znver1", AMDFAM17H, "amdfam17h")
-X86_CPU_TYPE_COMPAT ("knm", INTEL_KNM, "knm")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("bonnell", INTEL_BONNELL, "bonnell", "atom")
+X86_CPU_TYPE_COMPAT ("core2", INTEL_CORE2, "core2")
+X86_CPU_TYPE_COMPAT ("nehalem", INTEL_COREI7, "corei7")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("amdfam10", AMDFAM10H, "amdfam10h", "amdfam10")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("bdver1", AMDFAM15H, "amdfam15h", "amdfam15")
+X86_CPU_TYPE_COMPAT_WITH_ALIAS("silvermont", INTEL_SILVERMONT, "silvermont", "slm")
+X86_CPU_TYPE_COMPAT ("knl", INTEL_KNL, "knl")
+X86_CPU_TYPE_COMPAT ("btver1", AMD_BTVER1, "btver1")
+X86_CPU_TYPE_COMPAT ("btver2", AMD_BTVER2, "btver2")
+X86_CPU_TYPE_COMPAT ("znver1", AMDFAM17H, "amdfam17h")
+X86_CPU_TYPE_COMPAT ("knm", INTEL_KNM, "knm")
+X86_CPU_TYPE_COMPAT ("goldmont", INTEL_GOLDMONT, "goldmont")
+X86_CPU_TYPE_COMPAT ("goldmont-plus", INTEL_GOLDMONT_PLUS, "goldmont-plus")
+X86_CPU_TYPE_COMPAT ("tremont", INTEL_TREMONT, "tremont")
// Entries below this are not in libgcc/compiler-rt.
X86_CPU_TYPE ("i386", INTEL_i386)
X86_CPU_TYPE ("i486", INTEL_i486)
@@ -64,9 +67,6 @@ X86_CPU_TYPE ("athlon", AMD_ATHLON)
X86_CPU_TYPE ("athlon-xp", AMD_ATHLON_XP)
X86_CPU_TYPE ("k8", AMD_K8)
X86_CPU_TYPE ("k8-sse3", AMD_K8SSE3)
-X86_CPU_TYPE ("goldmont", INTEL_GOLDMONT)
-X86_CPU_TYPE ("goldmont-plus", INTEL_GOLDMONT_PLUS)
-X86_CPU_TYPE ("tremont", INTEL_TREMONT)
#undef X86_CPU_TYPE_COMPAT_WITH_ALIAS
#undef X86_CPU_TYPE_COMPAT
#undef X86_CPU_TYPE
@@ -97,9 +97,12 @@ X86_CPU_SUBTYPE_COMPAT("broadwell", INTEL_COREI7_BROADWELL, "broadwell
X86_CPU_SUBTYPE_COMPAT("skylake", INTEL_COREI7_SKYLAKE, "skylake")
X86_CPU_SUBTYPE_COMPAT("skylake-avx512", INTEL_COREI7_SKYLAKE_AVX512, "skylake-avx512")
X86_CPU_SUBTYPE_COMPAT("cannonlake", INTEL_COREI7_CANNONLAKE, "cannonlake")
+X86_CPU_SUBTYPE_COMPAT("icelake-client", INTEL_COREI7_ICELAKE_CLIENT, "icelake-client")
+X86_CPU_SUBTYPE_COMPAT("icelake-server", INTEL_COREI7_ICELAKE_SERVER, "icelake-server")
// Entries below this are not in libgcc/compiler-rt.
X86_CPU_SUBTYPE ("core2", INTEL_CORE2_65)
X86_CPU_SUBTYPE ("penryn", INTEL_CORE2_45)
+X86_CPU_SUBTYPE ("cascadelake", INTEL_COREI7_CASCADELAKE)
X86_CPU_SUBTYPE ("k6", AMDPENTIUM_K6)
X86_CPU_SUBTYPE ("k6-2", AMDPENTIUM_K62)
X86_CPU_SUBTYPE ("k6-3", AMDPENTIUM_K63)
@@ -147,11 +150,16 @@ X86_FEATURE_COMPAT(27, FEATURE_AVX512IFMA, "avx512ifma")
X86_FEATURE_COMPAT(28, FEATURE_AVX5124VNNIW, "avx5124vnniw")
X86_FEATURE_COMPAT(29, FEATURE_AVX5124FMAPS, "avx5124fmaps")
X86_FEATURE_COMPAT(30, FEATURE_AVX512VPOPCNTDQ, "avx512vpopcntdq")
+X86_FEATURE_COMPAT(31, FEATURE_AVX512VBMI2, "avx512vbmi2")
+X86_FEATURE_COMPAT(32, FEATURE_GFNI, "gfni")
+X86_FEATURE_COMPAT(33, FEATURE_VPCLMULQDQ, "vpclmulqdq")
+X86_FEATURE_COMPAT(34, FEATURE_AVX512VNNI, "avx512vnni")
+X86_FEATURE_COMPAT(35, FEATURE_AVX512BITALG, "avx512bitalg")
// Features below here are not in libgcc/compiler-rt.
-X86_FEATURE (32, FEATURE_MOVBE)
-X86_FEATURE (33, FEATURE_ADX)
-X86_FEATURE (34, FEATURE_EM64T)
-X86_FEATURE (35, FEATURE_CLFLUSHOPT)
-X86_FEATURE (36, FEATURE_SHA)
+X86_FEATURE (64, FEATURE_MOVBE)
+X86_FEATURE (65, FEATURE_ADX)
+X86_FEATURE (66, FEATURE_EM64T)
+X86_FEATURE (67, FEATURE_CLFLUSHOPT)
+X86_FEATURE (68, FEATURE_SHA)
#undef X86_FEATURE_COMPAT
#undef X86_FEATURE
diff --git a/contrib/llvm/include/llvm/Support/YAMLTraits.h b/contrib/llvm/include/llvm/Support/YAMLTraits.h
index 4b8c4e958288..3d790e96fff7 100644
--- a/contrib/llvm/include/llvm/Support/YAMLTraits.h
+++ b/contrib/llvm/include/llvm/Support/YAMLTraits.h
@@ -27,6 +27,7 @@
#include <cctype>
#include <cstddef>
#include <cstdint>
+#include <iterator>
#include <map>
#include <memory>
#include <new>
@@ -38,6 +39,12 @@
namespace llvm {
namespace yaml {
+enum class NodeKind : uint8_t {
+ Scalar,
+ Map,
+ Sequence,
+};
+
struct EmptyContext {};
/// This class should be specialized by any type that needs to be converted
@@ -144,14 +151,14 @@ struct ScalarTraits {
// Must provide:
//
// Function to write the value as a string:
- //static void output(const T &value, void *ctxt, llvm::raw_ostream &out);
+ // static void output(const T &value, void *ctxt, llvm::raw_ostream &out);
//
// Function to convert a string to a value. Returns the empty
// StringRef on success or an error string if string is malformed:
- //static StringRef input(StringRef scalar, void *ctxt, T &value);
+ // static StringRef input(StringRef scalar, void *ctxt, T &value);
//
// Function to determine if the value should be quoted.
- //static QuotingType mustQuote(StringRef);
+ // static QuotingType mustQuote(StringRef);
};
/// This class should be specialized by type that requires custom conversion
@@ -162,7 +169,7 @@ struct ScalarTraits {
/// static void output(const MyType &Value, void*, llvm::raw_ostream &Out)
/// {
/// // stream out custom formatting
-/// Out << Val;
+/// Out << Value;
/// }
/// static StringRef input(StringRef Scalar, void*, MyType &Value) {
/// // parse scalar and set `value`
@@ -180,6 +187,47 @@ struct BlockScalarTraits {
// Function to convert a string to a value. Returns the empty
// StringRef on success or an error string if string is malformed:
// static StringRef input(StringRef Scalar, void *ctxt, T &Value);
+ //
+ // Optional:
+ // static StringRef inputTag(T &Val, std::string Tag)
+ // static void outputTag(const T &Val, raw_ostream &Out)
+};
+
+/// This class should be specialized by type that requires custom conversion
+/// to/from a YAML scalar with optional tags. For example:
+///
+/// template <>
+/// struct TaggedScalarTraits<MyType> {
+/// static void output(const MyType &Value, void*, llvm::raw_ostream
+/// &ScalarOut, llvm::raw_ostream &TagOut)
+/// {
+/// // stream out custom formatting including optional Tag
+/// Out << Value;
+/// }
+/// static StringRef input(StringRef Scalar, StringRef Tag, void*, MyType
+/// &Value) {
+/// // parse scalar and set `value`
+/// // return empty string on success, or error string
+/// return StringRef();
+/// }
+/// static QuotingType mustQuote(const MyType &Value, StringRef) {
+/// return QuotingType::Single;
+/// }
+/// };
+template <typename T> struct TaggedScalarTraits {
+ // Must provide:
+ //
+ // Function to write the value and tag as strings:
+ // static void output(const T &Value, void *ctx, llvm::raw_ostream &ScalarOut,
+ // llvm::raw_ostream &TagOut);
+ //
+ // Function to convert a string to a value. Returns the empty
+ // StringRef on success or an error string if string is malformed:
+ // static StringRef input(StringRef Scalar, StringRef Tag, void *ctxt, T
+ // &Value);
+ //
+ // Function to determine if the value should be quoted.
+ // static QuotingType mustQuote(const T &Value, StringRef Scalar);
};
/// This class should be specialized by any type that needs to be converted
@@ -233,6 +281,31 @@ struct CustomMappingTraits {
// static void output(IO &io, T &elem);
};
+/// This class should be specialized by any type that can be represented as
+/// a scalar, map, or sequence, decided dynamically. For example:
+///
+/// typedef std::unique_ptr<MyBase> MyPoly;
+///
+/// template<>
+/// struct PolymorphicTraits<MyPoly> {
+/// static NodeKind getKind(const MyPoly &poly) {
+/// return poly->getKind();
+/// }
+/// static MyScalar& getAsScalar(MyPoly &poly) {
+/// if (!poly || !isa<MyScalar>(poly))
+/// poly.reset(new MyScalar());
+/// return *cast<MyScalar>(poly.get());
+/// }
+/// // ...
+/// };
+template <typename T> struct PolymorphicTraits {
+ // Must provide:
+ // static NodeKind getKind(const T &poly);
+ // static scalar_type &getAsScalar(T &poly);
+ // static map_type &getAsMap(T &poly);
+ // static sequence_type &getAsSequence(T &poly);
+};
+
// Only used for better diagnostics of missing traits
template <typename T>
struct MissingTrait;
@@ -249,7 +322,6 @@ struct has_ScalarEnumerationTraits
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<ScalarEnumerationTraits<T>>(nullptr)) == 1);
};
@@ -266,7 +338,6 @@ struct has_ScalarBitSetTraits
template <typename U>
static double test(...);
-public:
static bool const value = (sizeof(test<ScalarBitSetTraits<T>>(nullptr)) == 1);
};
@@ -286,7 +357,6 @@ struct has_ScalarTraits
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<ScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
};
@@ -305,11 +375,28 @@ struct has_BlockScalarTraits
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<BlockScalarTraits<T>>(nullptr, nullptr)) == 1);
};
+// Test if TaggedScalarTraits<T> is defined on type T.
+template <class T> struct has_TaggedScalarTraits {
+ using Signature_input = StringRef (*)(StringRef, StringRef, void *, T &);
+ using Signature_output = void (*)(const T &, void *, raw_ostream &,
+ raw_ostream &);
+ using Signature_mustQuote = QuotingType (*)(const T &, StringRef);
+
+ template <typename U>
+ static char test(SameType<Signature_input, &U::input> *,
+ SameType<Signature_output, &U::output> *,
+ SameType<Signature_mustQuote, &U::mustQuote> *);
+
+ template <typename U> static double test(...);
+
+ static bool const value =
+ (sizeof(test<TaggedScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
+};
+
// Test if MappingContextTraits<T> is defined on type T.
template <class T, class Context> struct has_MappingTraits {
using Signature_mapping = void (*)(class IO &, T &, Context &);
@@ -320,7 +407,6 @@ template <class T, class Context> struct has_MappingTraits {
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};
@@ -334,7 +420,6 @@ template <class T> struct has_MappingTraits<T, EmptyContext> {
template <typename U> static double test(...);
-public:
static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};
@@ -348,7 +433,6 @@ template <class T, class Context> struct has_MappingValidateTraits {
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};
@@ -362,7 +446,6 @@ template <class T> struct has_MappingValidateTraits<T, EmptyContext> {
template <typename U> static double test(...);
-public:
static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};
@@ -378,7 +461,6 @@ struct has_SequenceMethodTraits
template <typename U>
static double test(...);
-public:
static bool const value = (sizeof(test<SequenceTraits<T>>(nullptr)) == 1);
};
@@ -394,7 +476,6 @@ struct has_CustomMappingTraits
template <typename U>
static double test(...);
-public:
static bool const value =
(sizeof(test<CustomMappingTraits<T>>(nullptr)) == 1);
};
@@ -424,7 +505,6 @@ struct has_FlowTraits<T, true>
template<typename C>
static char (&f(...))[2];
-public:
static bool const value = sizeof(f<Derived>(nullptr)) == 2;
};
@@ -445,50 +525,114 @@ struct has_DocumentListTraits
template <typename U>
static double test(...);
-public:
static bool const value = (sizeof(test<DocumentListTraits<T>>(nullptr))==1);
};
-inline bool isNumber(StringRef S) {
- static const char OctalChars[] = "01234567";
- if (S.startswith("0") &&
- S.drop_front().find_first_not_of(OctalChars) == StringRef::npos)
- return true;
+template <class T> struct has_PolymorphicTraits {
+ using Signature_getKind = NodeKind (*)(const T &);
- if (S.startswith("0o") &&
- S.drop_front(2).find_first_not_of(OctalChars) == StringRef::npos)
- return true;
+ template <typename U>
+ static char test(SameType<Signature_getKind, &U::getKind> *);
- static const char HexChars[] = "0123456789abcdefABCDEF";
- if (S.startswith("0x") &&
- S.drop_front(2).find_first_not_of(HexChars) == StringRef::npos)
- return true;
+ template <typename U> static double test(...);
- static const char DecChars[] = "0123456789";
- if (S.find_first_not_of(DecChars) == StringRef::npos)
- return true;
+ static bool const value = (sizeof(test<PolymorphicTraits<T>>(nullptr)) == 1);
+};
- if (S.equals(".inf") || S.equals(".Inf") || S.equals(".INF"))
- return true;
+inline bool isNumeric(StringRef S) {
+ const static auto skipDigits = [](StringRef Input) {
+ return Input.drop_front(
+ std::min(Input.find_first_not_of("0123456789"), Input.size()));
+ };
- Regex FloatMatcher("^(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)([eE][-+]?[0-9]+)?$");
- if (FloatMatcher.match(S))
+ // Make S.front() and S.drop_front().front() (if S.front() is [+-]) calls
+ // safe.
+ if (S.empty() || S.equals("+") || S.equals("-"))
+ return false;
+
+ if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
return true;
- return false;
-}
+ // Infinity and decimal numbers can be prefixed with sign.
+ StringRef Tail = (S.front() == '-' || S.front() == '+') ? S.drop_front() : S;
-inline bool isNumeric(StringRef S) {
- if ((S.front() == '-' || S.front() == '+') && isNumber(S.drop_front()))
+ // Check for infinity first, because checking for hex and oct numbers is more
+ // expensive.
+ if (Tail.equals(".inf") || Tail.equals(".Inf") || Tail.equals(".INF"))
return true;
- if (isNumber(S))
- return true;
+ // Section 10.3.2 Tag Resolution
+ // YAML 1.2 Specification prohibits Base 8 and Base 16 numbers prefixed with
+ // [-+], so S should be used instead of Tail.
+ if (S.startswith("0o"))
+ return S.size() > 2 &&
+ S.drop_front(2).find_first_not_of("01234567") == StringRef::npos;
+
+ if (S.startswith("0x"))
+ return S.size() > 2 && S.drop_front(2).find_first_not_of(
+ "0123456789abcdefABCDEF") == StringRef::npos;
+
+ // Parse float: [-+]? (\. [0-9]+ | [0-9]+ (\. [0-9]* )?) ([eE] [-+]? [0-9]+)?
+ S = Tail;
+
+ // Handle cases when the number starts with '.' and hence needs at least one
+ // digit after dot (as opposed by number which has digits before the dot), but
+ // doesn't have one.
+ if (S.startswith(".") &&
+ (S.equals(".") ||
+ (S.size() > 1 && std::strchr("0123456789", S[1]) == nullptr)))
+ return false;
+
+ if (S.startswith("E") || S.startswith("e"))
+ return false;
+
+ enum ParseState {
+ Default,
+ FoundDot,
+ FoundExponent,
+ };
+ ParseState State = Default;
- if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
+ S = skipDigits(S);
+
+ // Accept decimal integer.
+ if (S.empty())
return true;
- return false;
+ if (S.front() == '.') {
+ State = FoundDot;
+ S = S.drop_front();
+ } else if (S.front() == 'e' || S.front() == 'E') {
+ State = FoundExponent;
+ S = S.drop_front();
+ } else {
+ return false;
+ }
+
+ if (State == FoundDot) {
+ S = skipDigits(S);
+ if (S.empty())
+ return true;
+
+ if (S.front() == 'e' || S.front() == 'E') {
+ State = FoundExponent;
+ S = S.drop_front();
+ } else {
+ return false;
+ }
+ }
+
+ assert(State == FoundExponent && "Should have found exponent at this point.");
+ if (S.empty())
+ return false;
+
+ if (S.front() == '+' || S.front() == '-') {
+ S = S.drop_front();
+ if (S.empty())
+ return false;
+ }
+
+ return skipDigits(S).empty();
}
inline bool isNull(StringRef S) {
@@ -535,7 +679,6 @@ inline QuotingType needsQuotes(StringRef S) {
// Safe scalar characters.
case '_':
case '-':
- case '/':
case '^':
case '.':
case ',':
@@ -552,6 +695,12 @@ inline QuotingType needsQuotes(StringRef S) {
// DEL (0x7F) are excluded from the allowed character range.
case 0x7F:
return QuotingType::Double;
+ // Forward slash is allowed to be unquoted, but we quote it anyway. We have
+ // many tests that use FileCheck against YAML output, and this output often
+ // contains paths. If we quote backslashes but not forward slashes then
+ // paths will come out either quoted or unquoted depending on which platform
+ // the test is run on, making FileCheck comparisons difficult.
+ case '/':
default: {
// C0 control block (0x0 - 0x1F) is excluded from the allowed character
// range.
@@ -578,10 +727,12 @@ struct missingTraits
!has_ScalarBitSetTraits<T>::value &&
!has_ScalarTraits<T>::value &&
!has_BlockScalarTraits<T>::value &&
+ !has_TaggedScalarTraits<T>::value &&
!has_MappingTraits<T, Context>::value &&
!has_SequenceTraits<T>::value &&
!has_CustomMappingTraits<T>::value &&
- !has_DocumentListTraits<T>::value> {};
+ !has_DocumentListTraits<T>::value &&
+ !has_PolymorphicTraits<T>::value> {};
template <typename T, typename Context>
struct validatedMappingTraits
@@ -635,6 +786,9 @@ public:
virtual void scalarString(StringRef &, QuotingType) = 0;
virtual void blockScalarString(StringRef &) = 0;
+ virtual void scalarTag(std::string &) = 0;
+
+ virtual NodeKind getNodeKind() = 0;
virtual void setError(const Twine &) = 0;
@@ -869,6 +1023,31 @@ yamlize(IO &YamlIO, T &Val, bool, EmptyContext &Ctx) {
}
}
+template <typename T>
+typename std::enable_if<has_TaggedScalarTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+ if (io.outputting()) {
+ std::string ScalarStorage, TagStorage;
+ raw_string_ostream ScalarBuffer(ScalarStorage), TagBuffer(TagStorage);
+ TaggedScalarTraits<T>::output(Val, io.getContext(), ScalarBuffer,
+ TagBuffer);
+ io.scalarTag(TagBuffer.str());
+ StringRef ScalarStr = ScalarBuffer.str();
+ io.scalarString(ScalarStr,
+ TaggedScalarTraits<T>::mustQuote(Val, ScalarStr));
+ } else {
+ std::string Tag;
+ io.scalarTag(Tag);
+ StringRef Str;
+ io.scalarString(Str, QuotingType::None);
+ StringRef Result =
+ TaggedScalarTraits<T>::input(Str, Tag, io.getContext(), Val);
+ if (!Result.empty()) {
+ io.setError(Twine(Result));
+ }
+ }
+}
+
template <typename T, typename Context>
typename std::enable_if<validatedMappingTraits<T, Context>::value, void>::type
yamlize(IO &io, T &Val, bool, Context &Ctx) {
@@ -925,6 +1104,20 @@ yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
}
template <typename T>
+typename std::enable_if<has_PolymorphicTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
+ switch (io.outputting() ? PolymorphicTraits<T>::getKind(Val)
+ : io.getNodeKind()) {
+ case NodeKind::Scalar:
+ return yamlize(io, PolymorphicTraits<T>::getAsScalar(Val), true, Ctx);
+ case NodeKind::Map:
+ return yamlize(io, PolymorphicTraits<T>::getAsMap(Val), true, Ctx);
+ case NodeKind::Sequence:
+ return yamlize(io, PolymorphicTraits<T>::getAsSequence(Val), true, Ctx);
+ }
+}
+
+template <typename T>
typename std::enable_if<missingTraits<T, EmptyContext>::value, void>::type
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
@@ -1202,6 +1395,8 @@ private:
void endBitSetScalar() override;
void scalarString(StringRef &, QuotingType) override;
void blockScalarString(StringRef &) override;
+ void scalarTag(std::string &) override;
+ NodeKind getNodeKind() override;
void setError(const Twine &message) override;
bool canElideEmptySequence() override;
@@ -1347,6 +1542,8 @@ public:
void endBitSetScalar() override;
void scalarString(StringRef &, QuotingType) override;
void blockScalarString(StringRef &) override;
+ void scalarTag(std::string &) override;
+ NodeKind getNodeKind() override;
void setError(const Twine &message) override;
bool canElideEmptySequence() override;
@@ -1366,14 +1563,21 @@ private:
void flowKey(StringRef Key);
enum InState {
- inSeq,
- inFlowSeq,
+ inSeqFirstElement,
+ inSeqOtherElement,
+ inFlowSeqFirstElement,
+ inFlowSeqOtherElement,
inMapFirstKey,
inMapOtherKey,
inFlowMapFirstKey,
inFlowMapOtherKey
};
+ static bool inSeqAnyElement(InState State);
+ static bool inFlowSeqAnyElement(InState State);
+ static bool inMapAnyKey(InState State);
+ static bool inFlowMapAnyKey(InState State);
+
raw_ostream &Out;
int WrapColumn;
SmallVector<InState, 8> StateStack;
@@ -1509,6 +1713,16 @@ operator>>(Input &In, T &Val) {
return In;
}
+// Define non-member operator>> so that Input can stream in a polymorphic type.
+template <typename T>
+inline typename std::enable_if<has_PolymorphicTraits<T>::value, Input &>::type
+operator>>(Input &In, T &Val) {
+ EmptyContext Ctx;
+ if (In.setCurrentDocument())
+ yamlize(In, Val, true, Ctx);
+ return In;
+}
+
// Provide better error message about types missing a trait specialization
template <typename T>
inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
@@ -1597,6 +1811,24 @@ operator<<(Output &Out, T &Val) {
return Out;
}
+// Define non-member operator<< so that Output can stream out a polymorphic
+// type.
+template <typename T>
+inline typename std::enable_if<has_PolymorphicTraits<T>::value, Output &>::type
+operator<<(Output &Out, T &Val) {
+ EmptyContext Ctx;
+ Out.beginDocuments();
+ if (Out.preflightDocument(0)) {
+ // FIXME: The parser does not support explicit documents terminated with a
+ // plain scalar; the end-marker is included as part of the scalar token.
+ assert(PolymorphicTraits<T>::getKind(Val) != NodeKind::Scalar && "plain scalar documents are not supported");
+ yamlize(Out, Val, true, Ctx);
+ Out.postflightDocument();
+ }
+ Out.endDocuments();
+ return Out;
+}
+
// Provide better error message about types missing a trait specialization
template <typename T>
inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
index b9ea9b5817f2..d062e716209d 100644
--- a/contrib/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -367,12 +367,18 @@ class raw_fd_ostream : public raw_pwrite_stream {
int FD;
bool ShouldClose;
+ bool SupportsSeeking;
+
+#ifdef _WIN32
+ /// True if this fd refers to a Windows console device. Mintty and other
+ /// terminal emulators are TTYs, but they are not consoles.
+ bool IsWindowsConsole = false;
+#endif
+
std::error_code EC;
uint64_t pos;
- bool SupportsSeeking;
-
/// See raw_ostream::write_impl.
void write_impl(const char *Ptr, size_t Size) override;
@@ -548,6 +554,8 @@ class buffer_ostream : public raw_svector_ostream {
raw_ostream &OS;
SmallVector<char, 0> Buffer;
+ virtual void anchor() override;
+
public:
buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
~buffer_ostream() override { OS << str(); }
diff --git a/contrib/llvm/include/llvm/Support/type_traits.h b/contrib/llvm/include/llvm/Support/type_traits.h
index 55d84f138f07..e7b8f2517b8a 100644
--- a/contrib/llvm/include/llvm/Support/type_traits.h
+++ b/contrib/llvm/include/llvm/Support/type_traits.h
@@ -30,9 +30,10 @@ namespace llvm {
template <typename T>
struct isPodLike {
// std::is_trivially_copyable is available in libc++ with clang, libstdc++
- // that comes with GCC 5.
+ // that comes with GCC 5. MSVC 2015 and newer also have
+ // std::is_trivially_copyable.
#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
- (defined(__GNUC__) && __GNUC__ >= 5)
+ (defined(__GNUC__) && __GNUC__ >= 5) || defined(_MSC_VER)
// If the compiler supports the is_trivially_copyable trait use it, as it
// matches the definition of isPodLike closely.
static const bool value = std::is_trivially_copyable<T>::value;
diff --git a/contrib/llvm/include/llvm/TableGen/StringMatcher.h b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
index 09d2092d43b0..3aa3540d616d 100644
--- a/contrib/llvm/include/llvm/TableGen/StringMatcher.h
+++ b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
@@ -23,12 +23,11 @@ namespace llvm {
class raw_ostream;
-/// StringMatcher - Given a list of strings and code to execute when they match,
-/// output a simple switch tree to classify the input string.
+/// Given a list of strings and code to execute when they match, output a
+/// simple switch tree to classify the input string.
///
-/// If a match is found, the code in Vals[i].second is executed; control must
+/// If a match is found, the code in Matches[i].second is executed; control must
/// not exit this code fragment. If nothing matches, execution falls through.
-///
class StringMatcher {
public:
using StringPair = std::pair<std::string, std::string>;
diff --git a/contrib/llvm/include/llvm/Target/CodeGenCWrappers.h b/contrib/llvm/include/llvm/Target/CodeGenCWrappers.h
index e9a990569d36..3ad77c5d5e00 100644
--- a/contrib/llvm/include/llvm/Target/CodeGenCWrappers.h
+++ b/contrib/llvm/include/llvm/Target/CodeGenCWrappers.h
@@ -31,6 +31,8 @@ inline Optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
LLVM_FALLTHROUGH;
case LLVMCodeModelDefault:
return None;
+ case LLVMCodeModelTiny:
+ return CodeModel::Tiny;
case LLVMCodeModelSmall:
return CodeModel::Small;
case LLVMCodeModelKernel:
@@ -45,6 +47,8 @@ inline Optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
inline LLVMCodeModel wrap(CodeModel::Model Model) {
switch (Model) {
+ case CodeModel::Tiny:
+ return LLVMCodeModelTiny;
case CodeModel::Small:
return LLVMCodeModelSmall;
case CodeModel::Kernel:
diff --git a/contrib/llvm/include/llvm/Target/GenericOpcodes.td b/contrib/llvm/include/llvm/Target/GenericOpcodes.td
index 79cc1e4d9eee..045fe2520047 100644
--- a/contrib/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/contrib/llvm/include/llvm/Target/GenericOpcodes.td
@@ -120,6 +120,36 @@ def G_VAARG : GenericInstruction {
let mayStore = 1;
}
+def G_CTLZ : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
+def G_CTLZ_ZERO_UNDEF : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
+def G_CTTZ : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
+def G_CTTZ_ZERO_UNDEF : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
+def G_CTPOP : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
def G_BSWAP : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src);
@@ -281,6 +311,14 @@ def G_PTR_MASK : GenericInstruction {
// Overflow ops
//------------------------------------------------------------------------------
+// Generic unsigned addition producing a carry flag.
+def G_UADDO : GenericInstruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
// Generic unsigned addition consuming and producing a carry flag.
def G_UADDE : GenericInstruction {
let OutOperandList = (outs type0:$dst, type1:$carry_out);
@@ -296,6 +334,19 @@ def G_SADDO : GenericInstruction {
let isCommutable = 1;
}
+// Generic signed addition consuming and producing a carry flag.
+def G_SADDE : GenericInstruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+ let hasSideEffects = 0;
+}
+
+// Generic unsigned subtraction producing a carry flag.
+def G_USUBO : GenericInstruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
// Generic unsigned subtraction consuming and producing a carry flag.
def G_USUBE : GenericInstruction {
let OutOperandList = (outs type0:$dst, type1:$carry_out);
@@ -303,13 +354,20 @@ def G_USUBE : GenericInstruction {
let hasSideEffects = 0;
}
-// Generic unsigned subtraction producing a carry flag.
+// Generic signed subtraction producing a carry flag.
def G_SSUBO : GenericInstruction {
let OutOperandList = (outs type0:$dst, type1:$carry_out);
let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
+// Generic signed subtraction consuming and producing a carry flag.
+def G_SSUBE : GenericInstruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+ let hasSideEffects = 0;
+}
+
// Generic unsigned multiplication producing a carry flag.
def G_UMULO : GenericInstruction {
let OutOperandList = (outs type0:$dst, type1:$carry_out);
@@ -482,6 +540,35 @@ def G_FLOG2 : GenericInstruction {
let hasSideEffects = 0;
}
+// Floating point base-10 logarithm of a value.
+def G_FLOG10 : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = 0;
+}
+
+// Floating point ceiling of a value.
+def G_FCEIL : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Opcodes for LLVM Intrinsics
+//------------------------------------------------------------------------------
+def G_INTRINSIC_TRUNC : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = 0;
+}
+
+def G_INTRINSIC_ROUND : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1);
+ let hasSideEffects = 0;
+}
+
//------------------------------------------------------------------------------
// Memory ops
//------------------------------------------------------------------------------
@@ -576,6 +663,9 @@ def G_EXTRACT : GenericInstruction {
// Extract multiple registers specified size, starting from blocks given by
// indexes. This will almost certainly be mapped to sub-register COPYs after
// register banks have been selected.
+// The output operands are always ordered from lowest bits to highest:
+// %bits_0_7:(s8), %bits_8_15:(s8),
+// %bits_16_23:(s8), %bits_24_31:(s8) = G_UNMERGE_VALUES %0:(s32)
def G_UNMERGE_VALUES : GenericInstruction {
let OutOperandList = (outs type0:$dst0, variable_ops);
let InOperandList = (ins type1:$src);
@@ -589,13 +679,38 @@ def G_INSERT : GenericInstruction {
let hasSideEffects = 0;
}
-/// Concatenate multiple registers of the same size into a wider register.
+// Concatenate multiple registers of the same size into a wider register.
+// The input operands are always ordered from lowest bits to highest:
+// %0:(s32) = G_MERGE_VALUES %bits_0_7:(s8), %bits_8_15:(s8),
+// %bits_16_23:(s8), %bits_24_31:(s8)
def G_MERGE_VALUES : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type1:$src0, variable_ops);
let hasSideEffects = 0;
}
+/// Create a vector from multiple scalar registers.
+def G_BUILD_VECTOR : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src0, variable_ops);
+ let hasSideEffects = 0;
+}
+
+/// Like G_BUILD_VECTOR, but truncates the larger operand types to fit the
+/// destination vector elt type.
+def G_BUILD_VECTOR_TRUNC : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src0, variable_ops);
+ let hasSideEffects = 0;
+}
+
+/// Create a vector by concatenating vectors together.
+def G_CONCAT_VECTORS : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src0, variable_ops);
+ let hasSideEffects = 0;
+}
+
// Intrinsic without side effects.
def G_INTRINSIC : GenericInstruction {
let OutOperandList = (outs);
diff --git a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index d487759a4852..31d26361260d 100644
--- a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -83,6 +83,13 @@ def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_void>;
def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_w_chain>;
def : GINodeEquiv<G_BR, br>;
def : GINodeEquiv<G_BSWAP, bswap>;
+def : GINodeEquiv<G_CTLZ, ctlz>;
+def : GINodeEquiv<G_CTTZ, cttz>;
+def : GINodeEquiv<G_CTLZ_ZERO_UNDEF, ctlz_zero_undef>;
+def : GINodeEquiv<G_CTTZ_ZERO_UNDEF, cttz_zero_undef>;
+def : GINodeEquiv<G_CTPOP, ctpop>;
+def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
+def : GINodeEquiv<G_FCEIL, fceil>;
// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
// complications that tablegen must take care of. For example, Predicates such
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index b746505d2a45..e4b827babb92 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -439,6 +439,7 @@ class Instruction {
// instruction.
bit isReturn = 0; // Is this instruction a return instruction?
bit isBranch = 0; // Is this instruction a branch instruction?
+ bit isEHScopeReturn = 0; // Does this instruction end an EH scope?
bit isIndirectBranch = 0; // Is this instruction an indirect branch?
bit isCompare = 0; // Is this instruction a comparison instruction?
bit isMoveImm = 0; // Is this instruction a move immediate instruction?
@@ -478,6 +479,7 @@ class Instruction {
bit isInsertSubreg = 0; // Is this instruction a kind of insert subreg?
// If so, make sure to override
// TargetInstrInfo::getInsertSubregLikeInputs.
+ bit variadicOpsAreDefs = 0; // Are variadic operands definitions?
// Does the instruction have side effects that are not captured by any
// operands of the instruction or other flags?
@@ -1103,7 +1105,7 @@ def FAULTING_OP : StandardPseudoInstruction {
let isBranch = 1;
}
def PATCHABLE_OP : StandardPseudoInstruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let usesCustomInserter = 1;
let mayLoad = 1;
@@ -1163,8 +1165,8 @@ def PATCHABLE_TYPED_EVENT_CALL : StandardPseudoInstruction {
let hasSideEffects = 1;
}
def FENTRY_CALL : StandardPseudoInstruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins variable_ops);
+ let OutOperandList = (outs);
+ let InOperandList = (ins);
let AsmString = "# FEntry call";
let usesCustomInserter = 1;
let mayLoad = 1;
@@ -1554,3 +1556,8 @@ include "llvm/Target/GlobalISel/Target.td"
// Pull in the common support for the Global ISel DAG-based selector generation.
//
include "llvm/Target/GlobalISel/SelectionDAGCompat.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for Pfm Counters generation.
+//
+include "llvm/Target/TargetPfmCounters.td"
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrPredicate.td b/contrib/llvm/include/llvm/Target/TargetInstrPredicate.td
index 8d57cae02d22..4b2c57b34c2e 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrPredicate.td
+++ b/contrib/llvm/include/llvm/Target/TargetInstrPredicate.td
@@ -7,29 +7,39 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines MCInstPredicate classes and its subclasses.
+// This file defines class MCInstPredicate and its subclasses.
//
-// MCInstPredicate is used to describe constraints on the opcode/operand(s) of
-// an instruction. Each MCInstPredicate class has a well-known semantic, and it
-// is used by a PredicateExpander to generate code for MachineInstr and/or
-// MCInst.
-//
-// MCInstPredicate definitions can be used to construct MCSchedPredicate
-// definitions. An MCSchedPredicate can be used in place of a SchedPredicate
-// when defining SchedReadVariant and SchedWriteVariant used by a processor
-// scheduling model.
+// MCInstPredicate definitions are used by target scheduling models to describe
+// constraints on instructions.
//
-// Here is an example of MCInstPredicate definition:
+// Here is an example of an MCInstPredicate definition in tablegen:
//
// def MCInstPredicateExample : CheckAll<[
// CheckOpcode<[BLR]>,
// CheckIsRegOperand<0>,
// CheckNot<CheckRegOperand<0, LR>>]>;
//
-// Predicate `MCInstPredicateExample` checks that the machine instruction in
-// input is a BLR, and that operand at index 0 is register `LR`.
+// The syntax for MCInstPredicate is declarative, and predicate definitions can
+// be composed together in order to generate more complex constraints.
+//
+// The `CheckAll` from the example defines a composition of three different
+// predicates. Definition `MCInstPredicateExample` identifies instructions
+// whose opcode is BLR, and whose first operand is a register different from
+// register `LR`.
+//
+// Every MCInstPredicate class has a well-known semantic in tablegen. For
+// example, `CheckOpcode` is a special type of predicate used to describe a
+// constraint on the value of an instruction opcode.
//
-// That predicate could be used to rewrite the following definition (from
+// MCInstPredicate definitions are typically used by scheduling models to
+// construct MCSchedPredicate definitions (see the definition of class
+// MCSchedPredicate in llvm/Target/TargetSchedule.td).
+// In particular, an MCSchedPredicate can be used instead of a SchedPredicate
+// when defining the set of SchedReadVariant and SchedWriteVariant of a
+// processor scheduling model.
+//
+// The `MCInstPredicateExample` definition above is equivalent (and therefore
+// could replace) the following definition from a previous ExynosM3 model (see
// AArch64SchedExynosM3.td):
//
// def M3BranchLinkFastPred : SchedPredicate<[{
@@ -37,22 +47,13 @@
// MI->getOperand(0).isReg() &&
// MI->getOperand(0).getReg() != AArch64::LR}]>;
//
-// MCInstPredicate definitions are used to construct MCSchedPredicate (see the
-// definition of class MCSchedPredicate in llvm/Target/TargetSchedule.td). An
-// MCSchedPredicate can be used by a `SchedVar` to associate a predicate with a
-// list of SchedReadWrites. Note that `SchedVar` are used to create SchedVariant
-// definitions.
-//
-// Each MCInstPredicate class has a well known semantic. For example,
-// `CheckOpcode` is only used to check the instruction opcode value.
-//
-// MCInstPredicate classes allow the definition of predicates in a declarative
-// way. These predicates don't require a custom block of C++, and can be used
-// to define conditions on instructions without being bound to a particular
+// The main advantage of using MCInstPredicate instead of SchedPredicate is
+// portability: users don't need to specify predicates in C++. As a consequence
+// of this, MCInstPredicate definitions are not bound to a particular
// representation (i.e. MachineInstr vs MCInst).
//
-// It also means that tablegen backends must know how to parse and expand them
-// into code that works on MCInst (or MachineInst).
+// Tablegen backends know how to expand MCInstPredicate definitions into actual
+// C++ code that works on MachineInstr (and/or MCInst).
//
// Instances of class PredicateExpander (see utils/Tablegen/PredicateExpander.h)
// know how to expand a predicate. For each MCInstPredicate class, there must be
@@ -68,6 +69,7 @@
// Forward declarations.
class Instruction;
+class SchedMachineModel;
// A generic machine instruction predicate.
class MCInstPredicate;
@@ -104,28 +106,50 @@ class CheckSameRegOperand<int First, int Second> : MCInstPredicate {
int SecondIndex = Second;
}
+// Base class for checks on register/immediate operands.
+// It allows users to define checks like:
+// MyFunction(MI->getOperand(Index).getImm()) == Val;
+//
+// In the example above, `MyFunction` is a function that takes as input an
+// immediate operand value, and returns another value. Field `FunctionMapper` is
+// the name of the function to call on the operand value.
+class CheckOperandBase<int Index, string Fn = ""> : MCOperandPredicate<Index> {
+ string FunctionMapper = Fn;
+}
+
// Check that the machine register operand at position `Index` references
// register R. This predicate assumes that we already checked that the machine
// operand at position `Index` is a register operand.
-class CheckRegOperand<int Index, Register R> : MCOperandPredicate<Index> {
+class CheckRegOperand<int Index, Register R> : CheckOperandBase<Index> {
Register Reg = R;
}
// Check if register operand at index `Index` is the invalid register.
-class CheckInvalidRegOperand<int Index> : MCOperandPredicate<Index>;
+class CheckInvalidRegOperand<int Index> : CheckOperandBase<Index>;
// Check that the operand at position `Index` is immediate `Imm`.
-class CheckImmOperand<int Index, int Imm> : MCOperandPredicate<Index> {
+// If field `FunctionMapper` is a non-empty string, then function
+// `FunctionMapper` is applied to the operand value, and the return value is then
+// compared against `Imm`.
+class CheckImmOperand<int Index, int Imm> : CheckOperandBase<Index> {
int ImmVal = Imm;
}
// Similar to CheckImmOperand, however the immediate is not a literal number.
// This is useful when we want to compare the value of an operand against an
// enum value, and we know the actual integer value of that enum.
-class CheckImmOperand_s<int Index, string Value> : MCOperandPredicate<Index> {
+class CheckImmOperand_s<int Index, string Value> : CheckOperandBase<Index> {
string ImmVal = Value;
}
+// Expands to a call to `FunctionMapper` if field `FunctionMapper` is set.
+// Otherwise, it expands to a CheckNot<CheckInvalidRegOperand<Index>>.
+class CheckRegOperandSimple<int Index> : CheckOperandBase<Index>;
+
+// Expands to a call to `FunctionMapper` if field `FunctionMapper` is set.
+// Otherwise, it simply evaluates to TruePred.
+class CheckImmOperandSimple<int Index> : CheckOperandBase<Index>;
+
// Check that the operand at position `Index` is immediate value zero.
class CheckZeroOperand<int Index> : CheckImmOperand<Index, 0>;
@@ -169,18 +193,53 @@ class CheckAll<list<MCInstPredicate> Sequence>
class CheckAny<list<MCInstPredicate> Sequence>
: CheckPredicateSequence<Sequence>;
-// Check that a call to method `Name` in class "XXXGenInstrInfo" (where XXX is
-// the `Target` name) returns true.
+
+// Used to expand the body of a function predicate. See the definition of
+// TIIPredicate below.
+class MCStatement;
+
+// Expands to a return statement. The return expression is a boolean expression
+// described by a MCInstPredicate.
+class MCReturnStatement<MCInstPredicate predicate> : MCStatement {
+ MCInstPredicate Pred = predicate;
+}
+
+// Used to automatically construct cases of a switch statement where the switch
+// variable is an instruction opcode. There is a 'case' for every opcode in the
+// `opcodes` list, and each case is associated with MCStatement `caseStmt`.
+class MCOpcodeSwitchCase<list<Instruction> opcodes, MCStatement caseStmt> {
+ list<Instruction> Opcodes = opcodes;
+ MCStatement CaseStmt = caseStmt;
+}
+
+// Expands to a switch statement. The switch variable is an instruction opcode.
+// The auto-generated switch is populated by a number of cases based on the
+// `cases` list in input. A default case is automatically generated, and it
+// evaluates to `default`.
+class MCOpcodeSwitchStatement<list<MCOpcodeSwitchCase> cases,
+ MCStatement default> : MCStatement {
+ list<MCOpcodeSwitchCase> Cases = cases;
+ MCStatement DefaultCase = default;
+}
+
+// Base class for function predicates.
+class FunctionPredicateBase<string name, MCStatement body> {
+ string FunctionName = name;
+ MCStatement Body = body;
+}
+
+// Check that a call to method `Name` in class "XXXInstrInfo" (where XXX is
+// the name of a target) returns true.
//
// TIIPredicate definitions are used to model calls to the target-specific
// InstrInfo. A TIIPredicate is treated specially by the InstrInfoEmitter
// tablegen backend, which will use it to automatically generate a definition in
-// the target specific `GenInstrInfo` class.
-class TIIPredicate<string Target, string Name, MCInstPredicate P> : MCInstPredicate {
- string TargetName = Target;
- string FunctionName = Name;
- MCInstPredicate Pred = P;
-}
+// the target specific `InstrInfo` class.
+//
+// There cannot be multiple TIIPredicate definitions with the same name for the
+// same target.
+class TIIPredicate<string Name, MCStatement body>
+ : FunctionPredicateBase<Name, body>, MCInstPredicate;
// A function predicate that takes as input a machine instruction, and returns
// a boolean value.
@@ -195,3 +254,106 @@ class CheckFunctionPredicate<string MCInstFn, string MachineInstrFn> : MCInstPre
string MCInstFnName = MCInstFn;
string MachineInstrFnName = MachineInstrFn;
}
+
+// Used to classify machine instructions based on a machine instruction
+// predicate.
+//
+// Let IC be an InstructionEquivalenceClass definition, and MI a machine
+// instruction. We say that MI belongs to the equivalence class described by IC
+// if and only if the following two conditions are met:
+// a) MI's opcode is in the `opcodes` set, and
+// b) `Predicate` evaluates to true when applied to MI.
+//
+// Instances of this class can be used by processor scheduling models to
+// describe instructions that have a property in common. For example,
+// InstructionEquivalenceClass definitions can be used to identify the set of
+// dependency breaking instructions for a processor model.
+//
+// An (optional) list of operand indices can be used to further describe
+// properties that apply to instruction operands. For example, it can be used to
+// identify register uses of a dependency breaking instructions that are not in
+// a RAW dependency.
+class InstructionEquivalenceClass<list<Instruction> opcodes,
+ MCInstPredicate pred,
+ list<int> operands = []> {
+ list<Instruction> Opcodes = opcodes;
+ MCInstPredicate Predicate = pred;
+ list<int> OperandIndices = operands;
+}
+
+// Used by processor models to describe dependency breaking instructions.
+//
+// This is mainly an alias for InstructionEquivalenceClass. Input operand
+// `BrokenDeps` identifies the set of "broken dependencies". There is one bit
+// per each implicit and explicit input operand. An empty set of broken
+// dependencies means: "explicit input register operands are independent."
+class DepBreakingClass<list<Instruction> opcodes, MCInstPredicate pred,
+ list<int> BrokenDeps = []>
+ : InstructionEquivalenceClass<opcodes, pred, BrokenDeps>;
+
+// A function descriptor used to describe the signature of a predicate methods
+// which will be expanded by the STIPredicateExpander into a tablegen'd
+// XXXGenSubtargetInfo class member definition (here, XXX is a target name).
+//
+// It describes the signature of a TargetSubtarget hook, as well as a few extra
+// properties. Examples of extra properties are:
+// - The default return value for the auto-generate function hook.
+// - A list of subtarget hooks (Delegates) that are called from this function.
+//
+class STIPredicateDecl<string name, MCInstPredicate default = FalsePred,
+ bit overrides = 1, bit expandForMC = 1,
+ bit updatesOpcodeMask = 0,
+ list<STIPredicateDecl> delegates = []> {
+ string Name = name;
+
+ MCInstPredicate DefaultReturnValue = default;
+
+ // True if this method is declared as virtual in class TargetSubtargetInfo.
+ bit OverridesBaseClassMember = overrides;
+
+ // True if we need an equivalent predicate function in the MC layer.
+ bit ExpandForMC = expandForMC;
+
+ // True if the autogenerated method has a extra in/out APInt param used as a
+ // mask of operands.
+ bit UpdatesOpcodeMask = updatesOpcodeMask;
+
+ // A list of STIPredicates used by this definition to delegate part of the
+ // computation. For example, STIPredicateFunction `isDependencyBreaking()`
+ // delegates to `isZeroIdiom()` part of its computation.
+ list<STIPredicateDecl> Delegates = delegates;
+}
+
+// A predicate function definition member of class `XXXGenSubtargetInfo`.
+//
+// If `Declaration.ExpandForMC` is true, then SubtargetEmitter
+// will also expand another definition of this method that accepts a MCInst.
+class STIPredicate<STIPredicateDecl declaration,
+ list<InstructionEquivalenceClass> classes> {
+ STIPredicateDecl Declaration = declaration;
+ list<InstructionEquivalenceClass> Classes = classes;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Convenience classes and definitions used by processor scheduling models to
+// describe dependency breaking instructions and move elimination candidates.
+let UpdatesOpcodeMask = 1 in {
+
+def IsZeroIdiomDecl : STIPredicateDecl<"isZeroIdiom">;
+
+let Delegates = [IsZeroIdiomDecl] in
+def IsDepBreakingDecl : STIPredicateDecl<"isDependencyBreaking">;
+
+} // UpdatesOpcodeMask
+
+def IsOptimizableRegisterMoveDecl
+ : STIPredicateDecl<"isOptimizableRegisterMove">;
+
+class IsZeroIdiomFunction<list<DepBreakingClass> classes>
+ : STIPredicate<IsZeroIdiomDecl, classes>;
+
+class IsDepBreakingFunction<list<DepBreakingClass> classes>
+ : STIPredicate<IsDepBreakingDecl, classes>;
+
+class IsOptimizableRegisterMove<list<InstructionEquivalenceClass> classes>
+ : STIPredicate<IsOptimizableRegisterMoveDecl, classes>;
diff --git a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index dbdfd4139a0f..e80f2bf82f26 100644
--- a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -45,6 +45,13 @@ class TargetLoweringObjectFile : public MCObjectFileInfo {
protected:
bool SupportIndirectSymViaGOTPCRel = false;
bool SupportGOTPCRelWithOffset = true;
+ bool SupportDebugThreadLocalLocation = true;
+
+ /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
+ /// for EH.
+ unsigned PersonalityEncoding = 0;
+ unsigned LSDAEncoding = 0;
+ unsigned TTypeEncoding = 0;
/// This section contains the static constructor pointer list.
MCSection *StaticCtorSection = nullptr;
@@ -135,6 +142,10 @@ public:
const TargetMachine &TM,
MachineModuleInfo *MMI) const;
+ unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
+ unsigned getLSDAEncoding() const { return LSDAEncoding; }
+ unsigned getTTypeEncoding() const { return TTypeEncoding; }
+
const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
MCStreamer &Streamer) const;
@@ -170,6 +181,11 @@ public:
return SupportGOTPCRelWithOffset;
}
+ /// Target supports TLS offset relocation in debug section?
+ bool supportDebugThreadLocalLocation() const {
+ return SupportDebugThreadLocalLocation;
+ }
+
/// Get the target specific PC relative GOT entry relocation
virtual const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
const MCValue &MV,
@@ -185,6 +201,12 @@ public:
virtual void emitLinkerFlagsForUsed(raw_ostream &OS,
const GlobalValue *GV) const {}
+ /// If supported, return the section to use for the llvm.commandline
+ /// metadata. Otherwise, return nullptr.
+ virtual MCSection *getSectionForCommandLines() const {
+ return nullptr;
+ }
+
protected:
virtual MCSection *SelectSectionForGlobal(const GlobalObject *GO,
SectionKind Kind,
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
index 1ca68c8df63a..3eafcc25583a 100644
--- a/contrib/llvm/include/llvm/Target/TargetMachine.h
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -84,11 +84,10 @@ protected: // Can only create subclasses.
CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
/// Contains target specific asm information.
- const MCAsmInfo *AsmInfo;
-
- const MCRegisterInfo *MRI;
- const MCInstrInfo *MII;
- const MCSubtargetInfo *STI;
+ std::unique_ptr<const MCAsmInfo> AsmInfo;
+ std::unique_ptr<const MCRegisterInfo> MRI;
+ std::unique_ptr<const MCInstrInfo> MII;
+ std::unique_ptr<const MCSubtargetInfo> STI;
unsigned RequireStructuredCFG : 1;
unsigned O0WantsFastISel : 1;
@@ -160,11 +159,11 @@ public:
void resetTargetOptions(const Function &F) const;
/// Return target specific asm information.
- const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
+ const MCAsmInfo *getMCAsmInfo() const { return AsmInfo.get(); }
- const MCRegisterInfo *getMCRegisterInfo() const { return MRI; }
- const MCInstrInfo *getMCInstrInfo() const { return MII; }
- const MCSubtargetInfo *getMCSubtargetInfo() const { return STI; }
+ const MCRegisterInfo *getMCRegisterInfo() const { return MRI.get(); }
+ const MCInstrInfo *getMCInstrInfo() const { return MII.get(); }
+ const MCSubtargetInfo *getMCSubtargetInfo() const { return STI.get(); }
/// If intrinsic information is available, return it. If not, return null.
virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
@@ -202,6 +201,9 @@ public:
bool getO0WantsFastISel() { return O0WantsFastISel; }
void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
void setGlobalISel(bool Enable) { Options.EnableGlobalISel = Enable; }
+ void setGlobalISelAbort(GlobalISelAbortMode Mode) {
+ Options.GlobalISelAbort = Mode;
+ }
void setMachineOutliner(bool Enable) {
Options.EnableMachineOutliner = Enable;
}
@@ -285,18 +287,6 @@ public:
void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
MCSymbol *getSymbol(const GlobalValue *GV) const;
-
- /// True if the target uses physical regs at Prolog/Epilog insertion
- /// time. If true (most machines), all vregs must be allocated before
- /// PEI. If false (virtual-register machines), then callee-save register
- /// spilling and scavenging are not needed or used.
- virtual bool usesPhysRegsForPEI() const { return true; }
-
- /// True if the target wants to use interprocedural register allocation by
- /// default. The -enable-ipra flag can be used to override this.
- virtual bool useIPRA() const {
- return false;
- }
};
/// This class describes a target machine that is implemented with the LLVM
@@ -350,8 +340,37 @@ public:
bool addAsmPrinter(PassManagerBase &PM, raw_pwrite_stream &Out,
raw_pwrite_stream *DwoOut, CodeGenFileType FileTYpe,
MCContext &Context);
+
+ /// True if the target uses physical regs at Prolog/Epilog insertion
+ /// time. If true (most machines), all vregs must be allocated before
+ /// PEI. If false (virtual-register machines), then callee-save register
+ /// spilling and scavenging are not needed or used.
+ virtual bool usesPhysRegsForPEI() const { return true; }
+
+ /// True if the target wants to use interprocedural register allocation by
+ /// default. The -enable-ipra flag can be used to override this.
+ virtual bool useIPRA() const {
+ return false;
+ }
};
+/// Helper method for getting the code model, returning Default if
+/// CM does not have a value. The tiny and kernel models will produce
+/// an error, so targets that support them or require more complex codemodel
+/// selection logic should implement and call their own getEffectiveCodeModel.
+inline CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM,
+ CodeModel::Model Default) {
+ if (CM) {
+ // By default, targets do not support the tiny and kernel models.
+ if (*CM == CodeModel::Tiny)
+ report_fatal_error("Target does not support the tiny CodeModel");
+ if (*CM == CodeModel::Kernel)
+ report_fatal_error("Target does not support the kernel CodeModel");
+ return *CM;
+ }
+ return Default;
+}
+
} // end namespace llvm
#endif // LLVM_TARGET_TARGETMACHINE_H
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
index 07ed773de55e..b18101d92833 100644
--- a/contrib/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -96,6 +96,14 @@ namespace llvm {
SCE // Tune debug info for SCE targets (e.g. PS4).
};
+ /// Enable abort calls when global instruction selection fails to lower/select
+ /// an instruction.
+ enum class GlobalISelAbortMode {
+ Disable, // Disable the abort.
+ Enable, // Enable the abort.
+ DisableWithDiag // Disable the abort but emit a diagnostic on failure.
+ };
+
class TargetOptions {
public:
TargetOptions()
@@ -192,6 +200,10 @@ namespace llvm {
/// EnableGlobalISel - This flag enables global instruction selection.
unsigned EnableGlobalISel : 1;
+ /// EnableGlobalISelAbort - Control abort behaviour when global instruction
+ /// selection fails to lower/select an instruction.
+ GlobalISelAbortMode GlobalISelAbort = GlobalISelAbortMode::Enable;
+
/// UseInitArray - Use .init_array instead of .ctors for static
/// constructors.
unsigned UseInitArray : 1;
diff --git a/contrib/llvm/include/llvm/Target/TargetPfmCounters.td b/contrib/llvm/include/llvm/Target/TargetPfmCounters.td
new file mode 100644
index 000000000000..dac150f03445
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetPfmCounters.td
@@ -0,0 +1,50 @@
+//===- TargetPfmCounters.td - Target Pfm Counters -*- tablegen ----------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces for performance counters.
+
+// Definition of a hardware counters from libpfm identifiers.
+class PfmCounter<string counter> {
+ // The name of the counter that measures events.
+ // The name can be "some_counter + some_other_counter", in which case the
+ // measured value is the sum of events on these counters.
+ string Counter = counter;
+}
+
+// Issue counters can be tied to a ProcResource
+class PfmIssueCounter<string resource_name, string counter>
+ : PfmCounter<counter> {
+ // The name of the ProcResource on which uops are issued. This is used by
+ // llvm-exegesis to compare measurements with values in the SchedModels.
+ // If the CPU has a sched model, this should correspond to the name of a
+ // ProcResource.
+ string ResourceName = resource_name;
+}
+
+def NoPfmCounter : PfmCounter <""> {}
+
+// Set of PfmCounters for measuring sched model characteristics.
+class ProcPfmCounters {
+ // Processors can define how to measure cycles by defining a CycleCounter.
+ PfmCounter CycleCounter = NoPfmCounter;
+ // Processors can define how to measure uops by defining a UopsCounter.
+ PfmCounter UopsCounter = NoPfmCounter;
+ // Processors can define how to measure issued uops by defining IssueCounters.
+ list<PfmIssueCounter> IssueCounters = [];
+}
+
+// A binding of a set of counters to a CPU.
+class PfmCountersBinding<string cpu_name, ProcPfmCounters counters> {
+ string CpuName = cpu_name;
+ ProcPfmCounters Counters = counters;
+}
+
+// Declares the default binding for unbound CPUs for the target.
+class PfmCountersDefaultBinding<ProcPfmCounters counters>
+ : PfmCountersBinding<"", counters> {}
diff --git a/contrib/llvm/include/llvm/Target/TargetSchedule.td b/contrib/llvm/include/llvm/Target/TargetSchedule.td
index 6fd2d5b78e54..808e183f5a5f 100644
--- a/contrib/llvm/include/llvm/Target/TargetSchedule.td
+++ b/contrib/llvm/include/llvm/Target/TargetSchedule.td
@@ -182,8 +182,7 @@ class ProcResourceKind;
//
// SchedModel ties these units to a processor for any stand-alone defs
// of this class.
-class ProcResourceUnits<ProcResourceKind kind, int num,
- list<string> pfmCounters> {
+class ProcResourceUnits<ProcResourceKind kind, int num> {
ProcResourceKind Kind = kind;
int NumUnits = num;
ProcResourceKind Super = ?;
@@ -198,8 +197,8 @@ def EponymousProcResourceKind : ProcResourceKind;
// Subtargets typically define processor resource kind and number of
// units in one place.
-class ProcResource<int num, list<string> pfmCounters = []> : ProcResourceKind,
- ProcResourceUnits<EponymousProcResourceKind, num, pfmCounters>;
+class ProcResource<int num> : ProcResourceKind,
+ ProcResourceUnits<EponymousProcResourceKind, num>;
class ProcResGroup<list<ProcResource> resources> : ProcResourceKind {
list<ProcResource> Resources = resources;
@@ -374,7 +373,11 @@ class SchedPredicate<code pred> : SchedPredicateBase {
SchedMachineModel SchedModel = ?;
code Predicate = pred;
}
-def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Define a predicate to be typically used as the default case in a
+// SchedVariant. It the SchedVariant does not use any other predicate based on
+// MCSchedPredicate, this is the default scheduling case used by llvm-mca.
+def NoSchedPred : MCSchedPredicate<TruePred>;
// Associate a predicate with a list of SchedReadWrites. By default,
// the selected SchedReadWrites are still associated with a single
@@ -461,6 +464,10 @@ class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
// - The number of physical registers which can be used for register renaming
// purpose.
// - The cost of a register rename.
+// - The set of registers that allow move elimination.
+// - The maximum number of moves that can be eliminated every cycle.
+// - Whether move elimination is limited to register moves whose input
+// is known to be zero.
//
// The cost of a rename is the number of physical registers allocated by the
// register alias table to map the new definition. By default, register can be
@@ -507,11 +514,35 @@ class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
// partial write is combined with the previous super-register definition. We
// should add support for these cases, and correctly model merge problems with
// partial register accesses.
+//
+// Field MaxMovesEliminatedPerCycle specifies how many moves can be eliminated
+// every cycle. A default value of zero for that field means: there is no limit
+// to the number of moves that can be eliminated by this register file.
+//
+// An instruction MI is a candidate for move elimination if a call to
+// method TargetSubtargetInfo::isOptimizableRegisterMove(MI) returns true (see
+// llvm/CodeGen/TargetSubtargetInfo.h, and llvm/MC/MCInstrAnalysis.h).
+//
+// Subtargets can instantiate tablegen class IsOptimizableRegisterMove (see
+// llvm/Target/TargetInstrPredicate.td) to customize the set of move elimination
+// candidates. By default, no instruction is a valid move elimination candidate.
+//
+// A register move MI is eliminated only if:
+// - MI is a move elimination candidate.
+// - The destination register is from a register class that allows move
+// elimination (see field `AllowMoveElimination` below).
+// - Constraints on the move kind, and the maximum number of moves that can be
+// eliminated per cycle are all met.
+
class RegisterFile<int numPhysRegs, list<RegisterClass> Classes = [],
- list<int> Costs = []> {
+ list<int> Costs = [], list<bit> AllowMoveElim = [],
+ int MaxMoveElimPerCy = 0, bit AllowZeroMoveElimOnly = 0> {
list<RegisterClass> RegClasses = Classes;
list<int> RegCosts = Costs;
+ list<bit> AllowMoveElimination = AllowMoveElim;
int NumPhysRegs = numPhysRegs;
+ int MaxMovesEliminatedPerCycle = MaxMoveElimPerCy;
+ bit AllowZeroMoveEliminationOnly = AllowZeroMoveElimOnly;
SchedMachineModel SchedModel = ?;
}
@@ -531,23 +562,12 @@ class RetireControlUnit<int bufferSize, int retirePerCycle> {
SchedMachineModel SchedModel = ?;
}
-// Allow the definition of hardware counters.
-class PfmCounter {
+// Base class for Load/StoreQueue. It is used to identify processor resources
+// which describe load/store queues in the LS unit.
+class MemoryQueue<ProcResource PR> {
+ ProcResource QueueDescriptor = PR;
SchedMachineModel SchedModel = ?;
}
-// Each processor can define how to measure cycles by defining a
-// PfmCycleCounter.
-class PfmCycleCounter<string counter> : PfmCounter {
- string Counter = counter;
-}
-
-// Each ProcResourceUnits can define how to measure issued uops by defining
-// a PfmIssueCounter.
-class PfmIssueCounter<ProcResourceUnits resource, list<string> counters>
- : PfmCounter{
- // The resource units on which uops are issued.
- ProcResourceUnits Resource = resource;
- // The list of counters that measure issue events.
- list<string> Counters = counters;
-}
+class LoadQueue<ProcResource LDQueue> : MemoryQueue<LDQueue>;
+class StoreQueue<ProcResource STQueue> : MemoryQueue<STQueue>;
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
index 4ba4d821225d..eb5a14bd21b8 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -116,12 +116,18 @@ def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc.
def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl
SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>
]>;
+def SDTIntShiftDOp: SDTypeProfile<1, 3, [ // fshl, fshr
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
+]>;
def SDTIntSatNoShOp : SDTypeProfile<1, 2, [ // ssat with no shift
SDTCisSameAs<0, 1>, SDTCisInt<2>
]>;
def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0>
]>;
+def SDTIntScaledBinOp : SDTypeProfile<1, 3, [ // smulfix
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
+]>;
def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc.
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>
@@ -162,7 +168,7 @@ def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
]>;
def SDTExtInvec : SDTypeProfile<1, 1, [ // sext_invec
SDTCisInt<0>, SDTCisVec<0>, SDTCisInt<1>, SDTCisVec<1>,
- SDTCisOpSmallerThanOp<1, 0>, SDTCisSameSizeAs<0,1>
+ SDTCisOpSmallerThanOp<1, 0>
]>;
def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
@@ -217,7 +223,7 @@ def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
]>;
def SDTMaskedStore: SDTypeProfile<0, 3, [ // masked store
- SDTCisPtrTy<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<1, 2>
+ SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>
]>;
def SDTMaskedLoad: SDTypeProfile<1, 3, [ // masked load
@@ -225,16 +231,6 @@ def SDTMaskedLoad: SDTypeProfile<1, 3, [ // masked load
SDTCisSameNumEltsAs<0, 2>
]>;
-def SDTMaskedGather: SDTypeProfile<2, 3, [ // masked gather
- SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<1, 3>,
- SDTCisPtrTy<4>, SDTCVecEltisVT<1, i1>, SDTCisSameNumEltsAs<0, 1>
-]>;
-
-def SDTMaskedScatter: SDTypeProfile<1, 3, [ // masked scatter
- SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameNumEltsAs<0, 1>,
- SDTCVecEltisVT<0, i1>, SDTCisPtrTy<3>
-]>;
-
def SDTVecShuffle : SDTypeProfile<1, 2, [
SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
]>;
@@ -360,6 +356,8 @@ def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>;
def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>;
def rotl : SDNode<"ISD::ROTL" , SDTIntShiftOp>;
def rotr : SDNode<"ISD::ROTR" , SDTIntShiftOp>;
+def fshl : SDNode<"ISD::FSHL" , SDTIntShiftDOp>;
+def fshr : SDNode<"ISD::FSHR" , SDTIntShiftDOp>;
def and : SDNode<"ISD::AND" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def or : SDNode<"ISD::OR" , SDTIntBinOp,
@@ -383,6 +381,12 @@ def umin : SDNode<"ISD::UMIN" , SDTIntBinOp,
def umax : SDNode<"ISD::UMAX" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
+def saddsat : SDNode<"ISD::SADDSAT" , SDTIntBinOp, [SDNPCommutative]>;
+def uaddsat : SDNode<"ISD::UADDSAT" , SDTIntBinOp, [SDNPCommutative]>;
+def ssubsat : SDNode<"ISD::SSUBSAT" , SDTIntBinOp>;
+def usubsat : SDNode<"ISD::USUBSAT" , SDTIntBinOp>;
+def smulfix : SDNode<"ISD::SMULFIX" , SDTIntScaledBinOp, [SDNPCommutative]>;
+
def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
def sext_invec : SDNode<"ISD::SIGN_EXTEND_VECTOR_INREG", SDTExtInvec>;
def zext_invec : SDNode<"ISD::ZERO_EXTEND_VECTOR_INREG", SDTExtInvec>;
@@ -416,8 +420,14 @@ def fminnum : SDNode<"ISD::FMINNUM" , SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def fmaxnum : SDNode<"ISD::FMAXNUM" , SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
-def fminnan : SDNode<"ISD::FMINNAN" , SDTFPBinOp>;
-def fmaxnan : SDNode<"ISD::FMAXNAN" , SDTFPBinOp>;
+def fminnum_ieee : SDNode<"ISD::FMINNUM_IEEE", SDTFPBinOp,
+ [SDNPCommutative]>;
+def fmaxnum_ieee : SDNode<"ISD::FMAXNUM_IEEE", SDTFPBinOp,
+ [SDNPCommutative]>;
+def fminimum : SDNode<"ISD::FMINIMUM" , SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def fmaximum : SDNode<"ISD::FMAXIMUM" , SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
def fgetsign : SDNode<"ISD::FGETSIGN" , SDTFPToIntOp>;
def fcanonicalize : SDNode<"ISD::FCANONICALIZE", SDTFPUnaryOp>;
def fneg : SDNode<"ISD::FNEG" , SDTFPUnaryOp>;
@@ -510,10 +520,6 @@ def masked_store : SDNode<"ISD::MSTORE", SDTMaskedStore,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def masked_load : SDNode<"ISD::MLOAD", SDTMaskedLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-def masked_scatter : SDNode<"ISD::MSCATTER", SDTMaskedScatter,
- [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
-def masked_gather : SDNode<"ISD::MGATHER", SDTMaskedGather,
- [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
@@ -630,6 +636,15 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
code ImmediateCode = [{}];
SDNodeXForm OperandTransform = xform;
+ // When this is set, the PredicateCode may refer to a constant Operands
+ // vector which contains the captured nodes of the DAG, in the order listed
+ // by the Operands field above.
+ //
+ // This is useful when Fragments involves associative / commutative
+ // operators: a single piece of code can easily refer to all operands even
+ // when re-associated / commuted variants of the fragment are matched.
+ bit PredicateCodeUsesOperands = 0;
+
// Define a few pre-packaged predicates. This helps GlobalISel import
// existing rules from SelectionDAG for many common cases.
// They will be tested prior to the code in pred and must not be used in
@@ -1067,6 +1082,15 @@ def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
let MemoryVT = f32;
}
+def nonvolatile_load : PatFrag<(ops node:$ptr),
+ (load node:$ptr), [{
+ return !cast<LoadSDNode>(N)->isVolatile();
+}]>;
+def nonvolatile_store : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return !cast<StoreSDNode>(N)->isVolatile();
+}]>;
+
// nontemporal store fragments.
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
diff --git a/contrib/llvm/include/llvm/Testing/Support/SupportHelpers.h b/contrib/llvm/include/llvm/Testing/Support/SupportHelpers.h
index 96264ac81dc4..b2975ec395d5 100644
--- a/contrib/llvm/include/llvm/Testing/Support/SupportHelpers.h
+++ b/contrib/llvm/include/llvm/Testing/Support/SupportHelpers.h
@@ -10,10 +10,13 @@
#ifndef LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
#define LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
-#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_os_ostream.h"
#include "gtest/gtest-printers.h"
+#include <string>
+
namespace llvm {
namespace detail {
struct ErrorHolder {
@@ -52,6 +55,10 @@ void PrintTo(const ExpectedHolder<T> &Item, std::ostream *Out) {
}
}
} // namespace detail
+
+namespace unittest {
+SmallString<128> getInputFileDirectory(const char *Argv0);
+}
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/TextAPI/ELF/ELFStub.h b/contrib/llvm/include/llvm/TextAPI/ELF/ELFStub.h
new file mode 100644
index 000000000000..fa54e6f8b711
--- /dev/null
+++ b/contrib/llvm/include/llvm/TextAPI/ELF/ELFStub.h
@@ -0,0 +1,69 @@
+//===- ELFStub.h ------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+///
+/// \file
+/// This file defines an internal representation of an ELF stub.
+///
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_TEXTAPI_ELF_ELFSTUB_H
+#define LLVM_TEXTAPI_ELF_ELFSTUB_H
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Support/VersionTuple.h"
+#include <vector>
+#include <set>
+
+namespace llvm {
+namespace elfabi {
+
+typedef uint16_t ELFArch;
+
+enum class ELFSymbolType {
+ NoType = ELF::STT_NOTYPE,
+ Object = ELF::STT_OBJECT,
+ Func = ELF::STT_FUNC,
+ TLS = ELF::STT_TLS,
+
+ // Type information is 4 bits, so 16 is safely out of range.
+ Unknown = 16,
+};
+
+struct ELFSymbol {
+ ELFSymbol(std::string SymbolName) : Name(SymbolName) {}
+ std::string Name;
+ uint64_t Size;
+ ELFSymbolType Type;
+ bool Undefined;
+ bool Weak;
+ Optional<std::string> Warning;
+ bool operator<(const ELFSymbol &RHS) const {
+ return Name < RHS.Name;
+ }
+};
+
+// A cumulative representation of ELF stubs.
+// Both textual and binary stubs will read into and write from this object.
+class ELFStub {
+// TODO: Add support for symbol versioning.
+public:
+ VersionTuple TbeVersion;
+ Optional<std::string> SoName;
+ ELFArch Arch;
+ std::vector<std::string> NeededLibs;
+ std::set<ELFSymbol> Symbols;
+
+ ELFStub() {}
+ ELFStub(const ELFStub &Stub);
+ ELFStub(ELFStub &&Stub);
+};
+} // end namespace elfabi
+} // end namespace llvm
+
+#endif // LLVM_TEXTAPI_ELF_ELFSTUB_H
diff --git a/contrib/llvm/include/llvm/TextAPI/ELF/TBEHandler.h b/contrib/llvm/include/llvm/TextAPI/ELF/TBEHandler.h
new file mode 100644
index 000000000000..91521c656fa2
--- /dev/null
+++ b/contrib/llvm/include/llvm/TextAPI/ELF/TBEHandler.h
@@ -0,0 +1,45 @@
+//===- TBEHandler.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===-----------------------------------------------------------------------===/
+///
+/// \file
+/// This file declares an interface for reading and writing .tbe (text-based
+/// ELF) files.
+///
+//===-----------------------------------------------------------------------===/
+
+#ifndef LLVM_TEXTAPI_ELF_TBEHANDLER_H
+#define LLVM_TEXTAPI_ELF_TBEHANDLER_H
+
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+
+namespace llvm {
+
+class raw_ostream;
+class Error;
+class StringRef;
+class VersionTuple;
+
+namespace elfabi {
+
+class ELFStub;
+
+const VersionTuple TBEVersionCurrent(1, 0);
+
+/// Attempts to read an ELF interface file from a StringRef buffer.
+Expected<std::unique_ptr<ELFStub>> readTBEFromBuffer(StringRef Buf);
+
+/// Attempts to write an ELF interface file to a raw_ostream.
+Error writeTBEToOutputStream(raw_ostream &OS, const ELFStub &Stub);
+
+} // end namespace elfabi
+} // end namespace llvm
+
+#endif // LLVM_TEXTAPI_ELF_TBEHANDLER_H
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
index ebc76bf82118..11d363b1200b 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -202,6 +202,11 @@ Pass *createReversePostOrderFunctionAttrsPass();
ModulePass *createMergeFunctionsPass();
//===----------------------------------------------------------------------===//
+/// createHotColdSplittingPass - This pass outlines cold blocks into a separate
+/// function(s).
+ModulePass *createHotColdSplittingPass();
+
+//===----------------------------------------------------------------------===//
/// createPartialInliningPass - This pass inlines parts of functions.
///
ModulePass *createPartialInliningPass();
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h b/contrib/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h
index dc9f18c79410..901fed7a0fa4 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h
@@ -32,7 +32,8 @@ class Pass;
enum MemoryAccessKind {
MAK_ReadNone = 0,
MAK_ReadOnly = 1,
- MAK_MayWrite = 2
+ MAK_MayWrite = 2,
+ MAK_WriteOnly = 3
};
/// Returns the memory access properties of this copy of the function.
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h
index 120a34e15933..c2103b637266 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -37,13 +37,61 @@ public:
/// containing all the GUIDs of all functions to import for a source module.
using FunctionsToImportTy = std::unordered_set<GlobalValue::GUID>;
+ /// The different reasons selectCallee will chose not to import a
+ /// candidate.
+ enum ImportFailureReason {
+ None,
+ // We can encounter a global variable instead of a function in rare
+ // situations with SamplePGO. See comments where this failure type is
+ // set for more details.
+ GlobalVar,
+ // Found to be globally dead, so we don't bother importing.
+ NotLive,
+ // Instruction count over the current threshold.
+ TooLarge,
+ // Don't import something with interposable linkage as we can't inline it
+ // anyway.
+ InterposableLinkage,
+ // Generally we won't end up failing due to this reason, as we expect
+ // to find at least one summary for the GUID that is global or a local
+ // in the referenced module for direct calls.
+ LocalLinkageNotInModule,
+ // This corresponds to the NotEligibleToImport being set on the summary,
+ // which can happen in a few different cases (e.g. local that can't be
+ // renamed or promoted because it is referenced on a llvm*.used variable).
+ NotEligible,
+ // This corresponds to NoInline being set on the function summary,
+ // which will happen if it is known that the inliner will not be able
+ // to inline the function (e.g. it is marked with a NoInline attribute).
+ NoInline
+ };
+
+ /// Information optionally tracked for candidates the importer decided
+ /// not to import. Used for optional stat printing.
+ struct ImportFailureInfo {
+ // The ValueInfo corresponding to the candidate. We save an index hash
+ // table lookup for each GUID by stashing this here.
+ ValueInfo VI;
+ // The maximum call edge hotness for all failed imports of this candidate.
+ CalleeInfo::HotnessType MaxHotness;
+ // most recent reason for failing to import (doesn't necessarily correspond
+ // to the attempt with the maximum hotness).
+ ImportFailureReason Reason;
+ // The number of times we tried to import candidate but failed.
+ unsigned Attempts;
+ ImportFailureInfo(ValueInfo VI, CalleeInfo::HotnessType MaxHotness,
+ ImportFailureReason Reason, unsigned Attempts)
+ : VI(VI), MaxHotness(MaxHotness), Reason(Reason), Attempts(Attempts) {}
+ };
+
/// Map of callee GUID considered for import into a given module to a pair
/// consisting of the largest threshold applied when deciding whether to
/// import it and, if we decided to import, a pointer to the summary instance
/// imported. If we decided not to import, the summary will be nullptr.
using ImportThresholdsTy =
DenseMap<GlobalValue::GUID,
- std::pair<unsigned, const GlobalValueSummary *>>;
+ std::tuple<unsigned, const GlobalValueSummary *,
+ std::unique_ptr<ImportFailureInfo>>>;
/// The map contains an entry for every module to import from, the key being
/// the module identifier to pass to the ModuleLoader. The value is the set of
@@ -128,6 +176,14 @@ void computeDeadSymbols(
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing);
+/// Compute dead symbols and run constant propagation in combined index
+/// after that.
+void computeDeadSymbolsWithConstProp(
+ ModuleSummaryIndex &Index,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
+ function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing,
+ bool ImportEnabled);
+
/// Converts value \p GV to declaration, or replaces with a declaration if
/// it is an alias. Returns true if converted, false if replaced.
bool convertToDeclaration(GlobalValue &GV);
@@ -153,10 +209,10 @@ std::error_code EmitImportsFiles(
StringRef ModulePath, StringRef OutputFilename,
const std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
-/// Resolve WeakForLinker values in \p TheModule based on the information
+/// Resolve prevailing symbol linkages in \p TheModule based on the information
/// recorded in the summaries during global summary-based analysis.
-void thinLTOResolveWeakForLinkerModule(Module &TheModule,
- const GVSummaryMapTy &DefinedGlobals);
+void thinLTOResolvePrevailingInModule(Module &TheModule,
+ const GVSummaryMapTy &DefinedGlobals);
/// Internalize \p TheModule based on the information recorded in the summaries
/// during global summary-based analysis.
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h b/contrib/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h
new file mode 100644
index 000000000000..57e9a9e69187
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h
@@ -0,0 +1,31 @@
+//===- HotColdSplitting.h ---- Outline Cold Regions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This pass outlines cold regions to a separate function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H
+#define LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to outline cold regions.
+class HotColdSplittingPass : public PassInfoMixin<HotColdSplittingPass> {
+public:
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H
+
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/SampleProfile.h b/contrib/llvm/include/llvm/Transforms/IPO/SampleProfile.h
index cd5a0563898e..af4a933ec1f6 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/SampleProfile.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/SampleProfile.h
@@ -25,13 +25,16 @@ class Module;
/// The sample profiler data loader pass.
class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
public:
- SampleProfileLoaderPass(std::string File = "", bool IsThinLTOPreLink = false)
- : ProfileFileName(File), IsThinLTOPreLink(IsThinLTOPreLink) {}
+ SampleProfileLoaderPass(std::string File = "", std::string RemappingFile = "",
+ bool IsThinLTOPreLink = false)
+ : ProfileFileName(File), ProfileRemappingFileName(RemappingFile),
+ IsThinLTOPreLink(IsThinLTOPreLink) {}
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
private:
std::string ProfileFileName;
+ std::string ProfileRemappingFileName;
bool IsThinLTOPreLink;
};
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index 4a346c8d7450..017cab0a7750 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -24,9 +24,11 @@
namespace llvm {
+class Triple;
class FunctionPass;
class ModulePass;
class OptimizationRemarkEmitter;
+class Comdat;
/// Instrumentation passes often insert conditional checks into entry blocks.
/// Call this function before splitting the entry block to move instructions
@@ -36,6 +38,17 @@ class OptimizationRemarkEmitter;
BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
BasicBlock::iterator IP);
+// Create a constant for Str so that we can pass it to the run-time lib.
+GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
+ bool AllowMerging,
+ const char *NamePrefix = "");
+
+// Returns F.getComdat() if it exists.
+// Otherwise creates a new comdat, sets F's comdat, and returns it.
+// Returns nullptr on failure.
+Comdat *GetOrCreateFunctionComdat(Function &F, Triple &T,
+ const std::string &ModuleId);
+
// Insert GCOV profiling instrumentation
struct GCOVOptions {
static GCOVOptions getDefault();
@@ -64,6 +77,12 @@ struct GCOVOptions {
// Emit the exit block immediately after the start block, rather than after
// all of the function body's blocks.
bool ExitBlockBeforeBody;
+
+ // Regexes separated by a semi-colon to filter the files to instrument.
+ std::string Filter;
+
+ // Regexes separated by a semi-colon to filter the files to not instrument.
+ std::string Exclude;
};
ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
@@ -111,6 +130,9 @@ struct InstrProfOptions {
// Do counter register promotion
bool DoCounterPromotion = false;
+ // Use atomic profile counter increments.
+ bool Atomic = false;
+
// Name of the profile file to use as output
std::string InstrProfileOutput;
@@ -127,18 +149,12 @@ FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
bool UseAfterScope = false);
ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
bool Recover = false,
- bool UseGlobalsGC = true);
-
-// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
-FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
- bool Recover = false);
+ bool UseGlobalsGC = true,
+ bool UseOdrIndicator = true);
FunctionPass *createHWAddressSanitizerPass(bool CompileKernel = false,
bool Recover = false);
-// Insert ThreadSanitizer (race detection) instrumentation
-FunctionPass *createThreadSanitizerPass();
-
// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
ModulePass *createDataFlowSanitizerPass(
const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
@@ -206,7 +222,6 @@ static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
assert(Scaled <= std::numeric_limits<uint32_t>::max() && "overflow 32-bits");
return Scaled;
}
-
} // end namespace llvm
#endif // LLVM_TRANSFORMS_INSTRUMENTATION_H
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation/ControlHeightReduction.h b/contrib/llvm/include/llvm/Transforms/Instrumentation/ControlHeightReduction.h
new file mode 100644
index 000000000000..460342d1631b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation/ControlHeightReduction.h
@@ -0,0 +1,31 @@
+//===- ControlHeightReduction.h - Control Height Reduction ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass merges conditional blocks of code and reduces the number of
+// conditional branches in the hot paths based on profiles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class ControlHeightReductionPass :
+ public PassInfoMixin<ControlHeightReductionPass> {
+public:
+ ControlHeightReductionPass();
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h b/contrib/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
new file mode 100644
index 000000000000..54f0e2f78230
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
@@ -0,0 +1,48 @@
+//===- Transforms/Instrumentation/MemorySanitizer.h - MSan Pass -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the memoy sanitizer pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerLegacyPassPass(int TrackOrigins = 0,
+ bool Recover = false,
+ bool EnableKmsan = false);
+
+/// A function pass for msan instrumentation.
+///
+/// Instruments functions to detect unitialized reads. This function pass
+/// inserts calls to runtime library functions. If the functions aren't declared
+/// yet, the pass inserts the declarations. Otherwise the existing globals are
+/// used.
+struct MemorySanitizerPass : public PassInfoMixin<MemorySanitizerPass> {
+ MemorySanitizerPass(int TrackOrigins = 0, bool Recover = false,
+ bool EnableKmsan = false)
+ : TrackOrigins(TrackOrigins), Recover(Recover), EnableKmsan(EnableKmsan) {
+ }
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+
+private:
+ int TrackOrigins;
+ bool Recover;
+ bool EnableKmsan;
+};
+}
+
+#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H */
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
index c0b37c470b74..fdc5df68a669 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
@@ -36,12 +36,14 @@ public:
/// The profile annotation (profile-instr-use) pass for IR based PGO.
class PGOInstrumentationUse : public PassInfoMixin<PGOInstrumentationUse> {
public:
- PGOInstrumentationUse(std::string Filename = "");
+ PGOInstrumentationUse(std::string Filename = "",
+ std::string RemappingFilename = "");
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
private:
std::string ProfileFileName;
+ std::string ProfileRemappingFileName;
};
/// The indirect function call promotion pass.
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h b/contrib/llvm/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
new file mode 100644
index 000000000000..701e2e6ec89e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
@@ -0,0 +1,33 @@
+//===- Transforms/Instrumentation/MemorySanitizer.h - TSan Pass -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the thread sanitizer pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerLegacyPassPass();
+
+/// A function pass for tsan instrumentation.
+///
+/// Instruments functions to detect race conditions reads. This function pass
+/// inserts calls to runtime library functions. If the functions aren't declared
+/// yet, the pass inserts the declarations. Otherwise the existing globals are
+struct ThreadSanitizerPass : public PassInfoMixin<ThreadSanitizerPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // namespace llvm
+#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H */
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
index 9491e1bbac93..8fcf9296ba47 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -26,7 +26,6 @@ class ModulePass;
class Pass;
class GetElementPtrInst;
class PassInfo;
-class TerminatorInst;
class TargetLowering;
class TargetMachine;
@@ -184,11 +183,12 @@ Pass *createLoopInstSimplifyPass();
//
// LoopUnroll - This pass is a simple loop unrolling pass.
//
-Pass *createLoopUnrollPass(int OptLevel = 2, int Threshold = -1, int Count = -1,
+Pass *createLoopUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false,
+ int Threshold = -1, int Count = -1,
int AllowPartial = -1, int Runtime = -1,
int UpperBound = -1, int AllowPeeling = -1);
// Create an unrolling pass for full unrolling that uses exact trip count only.
-Pass *createSimpleLoopUnrollPass(int OptLevel = 2);
+Pass *createSimpleLoopUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false);
//===----------------------------------------------------------------------===//
//
@@ -394,12 +394,6 @@ FunctionPass *createPartiallyInlineLibCallsPass();
//===----------------------------------------------------------------------===//
//
-// ScalarizerPass - Converts vector operations into scalar operations
-//
-FunctionPass *createScalarizerPass();
-
-//===----------------------------------------------------------------------===//
-//
// SeparateConstOffsetFromGEP - Split GEPs for better CSE
//
FunctionPass *createSeparateConstOffsetFromGEPPass(bool LowerGEP = false);
@@ -477,6 +471,7 @@ FunctionPass *createLoopDataPrefetchPass();
///===---------------------------------------------------------------------===//
ModulePass *createNameAnonGlobalPass();
+ModulePass *createCanonicalizeAliasesPass();
//===----------------------------------------------------------------------===//
//
@@ -491,6 +486,13 @@ FunctionPass *createLibCallsShrinkWrapPass();
// primarily to help other loop passes.
//
Pass *createLoopSimplifyCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// WarnMissedTransformations - This pass emits warnings for leftover forced
+// transformations.
+//
+Pass *createWarnMissedTransformationsPass();
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/ConstantHoisting.h b/contrib/llvm/include/llvm/Transforms/Scalar/ConstantHoisting.h
index 84589bf4db99..ba32e122fa10 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/ConstantHoisting.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/ConstantHoisting.h
@@ -38,6 +38,7 @@
#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
@@ -50,8 +51,10 @@ class BasicBlock;
class BlockFrequencyInfo;
class Constant;
class ConstantInt;
+class ConstantExpr;
class DominatorTree;
class Function;
+class GlobalVariable;
class Instruction;
class TargetTransformInfo;
@@ -74,10 +77,15 @@ using ConstantUseListType = SmallVector<ConstantUser, 8>;
/// Keeps track of a constant candidate and its uses.
struct ConstantCandidate {
ConstantUseListType Uses;
+ // If the candidate is a ConstantExpr (currely only constant GEP expressions
+ // whose base pointers are GlobalVariables are supported), ConstInt records
+ // its offset from the base GV, ConstExpr tracks the candidate GEP expr.
ConstantInt *ConstInt;
+ ConstantExpr *ConstExpr;
unsigned CumulativeCost = 0;
- ConstantCandidate(ConstantInt *ConstInt) : ConstInt(ConstInt) {}
+ ConstantCandidate(ConstantInt *ConstInt, ConstantExpr *ConstExpr=nullptr) :
+ ConstInt(ConstInt), ConstExpr(ConstExpr) {}
/// Add the user to the use list and update the cost.
void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) {
@@ -91,16 +99,21 @@ struct ConstantCandidate {
struct RebasedConstantInfo {
ConstantUseListType Uses;
Constant *Offset;
+ Type *Ty;
- RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset)
- : Uses(std::move(Uses)), Offset(Offset) {}
+ RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset,
+ Type *Ty=nullptr) : Uses(std::move(Uses)), Offset(Offset), Ty(Ty) {}
};
using RebasedConstantListType = SmallVector<RebasedConstantInfo, 4>;
/// A base constant and all its rebased constants.
struct ConstantInfo {
- ConstantInt *BaseConstant;
+ // If the candidate is a ConstantExpr (currely only constant GEP expressions
+ // whose base pointers are GlobalVariables are supported), ConstInt records
+ // its offset from the base GV, ConstExpr tracks the candidate GEP expr.
+ ConstantInt *BaseInt;
+ ConstantExpr *BaseExpr;
RebasedConstantListType RebasedConstants;
};
@@ -115,29 +128,43 @@ public:
BlockFrequencyInfo *BFI, BasicBlock &Entry);
void releaseMemory() {
- ConstantVec.clear();
ClonedCastMap.clear();
- ConstCandVec.clear();
+ ConstIntCandVec.clear();
+ for (auto MapEntry : ConstGEPCandMap)
+ MapEntry.second.clear();
+ ConstGEPCandMap.clear();
+ ConstIntInfoVec.clear();
+ for (auto MapEntry : ConstGEPInfoMap)
+ MapEntry.second.clear();
+ ConstGEPInfoMap.clear();
}
private:
- using ConstCandMapType = DenseMap<ConstantInt *, unsigned>;
- using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
+ using ConstPtrUnionType = PointerUnion<ConstantInt *, ConstantExpr *>;
+ using ConstCandMapType = DenseMap<ConstPtrUnionType, unsigned>;
const TargetTransformInfo *TTI;
DominatorTree *DT;
BlockFrequencyInfo *BFI;
+ LLVMContext *Ctx;
+ const DataLayout *DL;
BasicBlock *Entry;
/// Keeps track of constant candidates found in the function.
- ConstCandVecType ConstCandVec;
+ using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
+ using GVCandVecMapType = DenseMap<GlobalVariable *, ConstCandVecType>;
+ ConstCandVecType ConstIntCandVec;
+ GVCandVecMapType ConstGEPCandMap;
+
+ /// These are the final constants we decided to hoist.
+ using ConstInfoVecType = SmallVector<consthoist::ConstantInfo, 8>;
+ using GVInfoVecMapType = DenseMap<GlobalVariable *, ConstInfoVecType>;
+ ConstInfoVecType ConstIntInfoVec;
+ GVInfoVecMapType ConstGEPInfoMap;
/// Keep track of cast instructions we already cloned.
SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
- /// These are the final constants we decided to hoist.
- SmallVector<consthoist::ConstantInfo, 8> ConstantVec;
-
Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
SmallPtrSet<Instruction *, 8>
findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
@@ -145,19 +172,27 @@ private:
Instruction *Inst, unsigned Idx,
ConstantInt *ConstInt);
void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+ Instruction *Inst, unsigned Idx,
+ ConstantExpr *ConstExpr);
+ void collectConstantCandidates(ConstCandMapType &ConstCandMap,
Instruction *Inst, unsigned Idx);
void collectConstantCandidates(ConstCandMapType &ConstCandMap,
Instruction *Inst);
void collectConstantCandidates(Function &Fn);
void findAndMakeBaseConstant(ConstCandVecType::iterator S,
- ConstCandVecType::iterator E);
+ ConstCandVecType::iterator E,
+ SmallVectorImpl<consthoist::ConstantInfo> &ConstInfoVec);
unsigned maximizeConstantsInRange(ConstCandVecType::iterator S,
ConstCandVecType::iterator E,
ConstCandVecType::iterator &MaxCostItr);
- void findBaseConstants();
- void emitBaseConstants(Instruction *Base, Constant *Offset,
+ // If BaseGV is nullptr, find base among Constant Integer candidates;
+ // otherwise find base among constant GEPs sharing BaseGV as base pointer.
+ void findBaseConstants(GlobalVariable *BaseGV);
+ void emitBaseConstants(Instruction *Base, Constant *Offset, Type *Ty,
const consthoist::ConstantUser &ConstUser);
- bool emitBaseConstants();
+ // If BaseGV is nullptr, emit Constant Integer base; otherwise emit
+ // constant GEP base.
+ bool emitBaseConstants(GlobalVariable *BaseGV);
void deleteDeadCastInst() const;
bool optimizeConstants(Function &Fn);
};
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/GVN.h b/contrib/llvm/include/llvm/Transforms/Scalar/GVN.h
index b9de07ec9279..9827678b89f2 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -22,13 +22,14 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/InstructionPrecedenceTracking.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Transforms/Utils/OrderedInstructions.h"
#include <cstdint>
#include <utility>
#include <vector>
@@ -158,11 +159,8 @@ private:
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
OptimizationRemarkEmitter *ORE;
- // Maps a block to the topmost instruction with implicit control flow in it.
- DenseMap<const BasicBlock *, const Instruction *>
- FirstImplicitControlFlowInsts;
+ ImplicitControlFlowTracking *ICF;
- OrderedInstructions *OI;
ValueTable VN;
/// A mapping from value numbers to lists of Value*'s that
@@ -183,7 +181,12 @@ private:
// Map the block to reversed postorder traversal number. It is used to
// find back edge easily.
- DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
+ DenseMap<AssertingVH<BasicBlock>, uint32_t> BlockRPONumber;
+
+ // This is set 'true' initially and also when new blocks have been added to
+ // the function being analyzed. This boolean is used to control the updating
+ // of BlockRPONumber prior to accessing the contents of BlockRPONumber.
+ bool InvalidBlockRPONumbers = true;
using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
@@ -240,7 +243,7 @@ private:
}
// List of critical edges to be split between iterations.
- SmallVector<std::pair<TerminatorInst *, unsigned>, 4> toSplit;
+ SmallVector<std::pair<Instruction *, unsigned>, 4> toSplit;
// Helper functions of redundant load elimination
bool processLoad(LoadInst *L);
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/JumpThreading.h b/contrib/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
index b3493a292498..9894345645a1 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -23,6 +23,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/IR/DomTreeUpdater.h"
#include "llvm/IR/ValueHandle.h"
#include <memory>
#include <utility>
@@ -34,7 +35,7 @@ class BinaryOperator;
class BranchInst;
class CmpInst;
class Constant;
-class DeferredDominance;
+class DomTreeUpdater;
class Function;
class Instruction;
class IntrinsicInst;
@@ -78,7 +79,7 @@ class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
AliasAnalysis *AA;
- DeferredDominance *DDT;
+ DomTreeUpdater *DTU;
std::unique_ptr<BlockFrequencyInfo> BFI;
std::unique_ptr<BranchProbabilityInfo> BPI;
bool HasProfileData = false;
@@ -88,29 +89,16 @@ class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
#else
SmallSet<AssertingVH<const BasicBlock>, 16> LoopHeaders;
#endif
- DenseSet<std::pair<Value *, BasicBlock *>> RecursionSet;
unsigned BBDupThreshold;
- // RAII helper for updating the recursion stack.
- struct RecursionSetRemover {
- DenseSet<std::pair<Value *, BasicBlock *>> &TheSet;
- std::pair<Value *, BasicBlock *> ThePair;
-
- RecursionSetRemover(DenseSet<std::pair<Value *, BasicBlock *>> &S,
- std::pair<Value *, BasicBlock *> P)
- : TheSet(S), ThePair(P) {}
-
- ~RecursionSetRemover() { TheSet.erase(ThePair); }
- };
-
public:
JumpThreadingPass(int T = -1);
// Glue for old PM.
bool runImpl(Function &F, TargetLibraryInfo *TLI_, LazyValueInfo *LVI_,
- AliasAnalysis *AA_, DeferredDominance *DDT_,
- bool HasProfileData_, std::unique_ptr<BlockFrequencyInfo> BFI_,
+ AliasAnalysis *AA_, DomTreeUpdater *DTU_, bool HasProfileData_,
+ std::unique_ptr<BlockFrequencyInfo> BFI_,
std::unique_ptr<BranchProbabilityInfo> BPI_);
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
@@ -127,11 +115,21 @@ public:
bool DuplicateCondBranchOnPHIIntoPred(
BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs);
+ bool ComputeValueKnownInPredecessorsImpl(
+ Value *V, BasicBlock *BB, jumpthreading::PredValueInfo &Result,
+ jumpthreading::ConstantPreference Preference,
+ DenseSet<std::pair<Value *, BasicBlock *>> &RecursionSet,
+ Instruction *CxtI = nullptr);
bool
ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,
jumpthreading::PredValueInfo &Result,
jumpthreading::ConstantPreference Preference,
- Instruction *CxtI = nullptr);
+ Instruction *CxtI = nullptr) {
+ DenseSet<std::pair<Value *, BasicBlock *>> RecursionSet;
+ return ComputeValueKnownInPredecessorsImpl(V, BB, Result, Preference,
+ RecursionSet, CxtI);
+ }
+
bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
jumpthreading::ConstantPreference Preference,
Instruction *CxtI = nullptr);
@@ -141,7 +139,11 @@ public:
bool ProcessImpliedCondition(BasicBlock *BB);
bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
+ void UnfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, SelectInst *SI,
+ PHINode *SIUse, unsigned Idx);
+
bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
+ bool TryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB);
bool TryToUnfoldSelectInCurrBB(BasicBlock *BB);
bool ProcessGuards(BasicBlock *BB);
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/contrib/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
index 5f61c39b5530..46ebb74c413c 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -276,7 +276,15 @@ public:
// pass pipeline to put loops into their canonical form. Note that we can
// directly build up function analyses after this as the function pass
// manager handles all the invalidation at that layer.
- PreservedAnalyses PA = LoopCanonicalizationFPM.run(F, AM);
+ PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(F);
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // canonicalization pipeline.
+ if (PI.runBeforePass<Function>(LoopCanonicalizationFPM, F)) {
+ PA = LoopCanonicalizationFPM.run(F, AM);
+ PI.runAfterPass<Function>(LoopCanonicalizationFPM, F);
+ }
// Get the loop structure for this function
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
@@ -337,8 +345,19 @@ public:
assert(L->isRecursivelyLCSSAForm(LAR.DT, LI) &&
"Loops must remain in LCSSA form!");
#endif
-
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // pass, skip its execution completely if asked to (callback returns
+ // false).
+ if (!PI.runBeforePass<Loop>(Pass, *L))
+ continue;
PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
+
+ // Do not pass deleted Loop into the instrumentation.
+ if (Updater.skipCurrentLoop())
+ PI.runAfterPassInvalidated<Loop>(Pass);
+ else
+ PI.runAfterPass<Loop>(Pass, *L);
+
// FIXME: We should verify the set of analyses relevant to Loop passes
// are preserved.
@@ -364,8 +383,8 @@ public:
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<ScalarEvolutionAnalysis>();
- // FIXME: Uncomment this when all loop passes preserve MemorySSA
- // PA.preserve<MemorySSAAnalysis>();
+ if (EnableMSSALoopDependency)
+ PA.preserve<MemorySSAAnalysis>();
// FIXME: What we really want to do here is preserve an AA category, but
// that concept doesn't exist yet.
PA.preserve<AAManager>();
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/contrib/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
index 9848e0d54f2b..e38e983cc9eb 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -10,6 +10,7 @@
#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"
@@ -23,23 +24,90 @@ class LPMUpdater;
class LoopFullUnrollPass : public PassInfoMixin<LoopFullUnrollPass> {
const int OptLevel;
+ /// If false, use a cost model to determine whether unrolling of a loop is
+ /// profitable. If true, only loops that explicitly request unrolling via
+ /// metadata are considered. All other loops are skipped.
+ const bool OnlyWhenForced;
+
public:
- explicit LoopFullUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+ explicit LoopFullUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false)
+ : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced) {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
+/// A set of parameters used to control various transforms performed by the
+/// LoopUnroll pass. Each of the boolean parameters can be set to:
+/// true - enabling the transformation.
+/// false - disabling the transformation.
+/// None - relying on a global default.
+///
+/// There is also OptLevel parameter, which is used for additional loop unroll
+/// tuning.
+///
+/// Intended use is to create a default object, modify parameters with
+/// additional setters and then pass it to LoopUnrollPass.
+///
+struct LoopUnrollOptions {
+ Optional<bool> AllowPartial;
+ Optional<bool> AllowPeeling;
+ Optional<bool> AllowRuntime;
+ Optional<bool> AllowUpperBound;
+ int OptLevel;
+
+ /// If false, use a cost model to determine whether unrolling of a loop is
+ /// profitable. If true, only loops that explicitly request unrolling via
+ /// metadata are considered. All other loops are skipped.
+ bool OnlyWhenForced;
+
+ LoopUnrollOptions(int OptLevel = 2, bool OnlyWhenForced = false)
+ : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced) {}
+
+ /// Enables or disables partial unrolling. When disabled only full unrolling
+ /// is allowed.
+ LoopUnrollOptions &setPartial(bool Partial) {
+ AllowPartial = Partial;
+ return *this;
+ }
+
+ /// Enables or disables unrolling of loops with runtime trip count.
+ LoopUnrollOptions &setRuntime(bool Runtime) {
+ AllowRuntime = Runtime;
+ return *this;
+ }
+
+ /// Enables or disables loop peeling.
+ LoopUnrollOptions &setPeeling(bool Peeling) {
+ AllowPeeling = Peeling;
+ return *this;
+ }
+
+ /// Enables or disables the use of trip count upper bound
+ /// in loop unrolling.
+ LoopUnrollOptions &setUpperBound(bool UpperBound) {
+ AllowUpperBound = UpperBound;
+ return *this;
+ }
+
+ // Sets "optimization level" tuning parameter for loop unrolling.
+ LoopUnrollOptions &setOptLevel(int O) {
+ OptLevel = O;
+ return *this;
+ }
+};
+
/// Loop unroll pass that will support both full and partial unrolling.
/// It is a function pass to have access to function and module analyses.
/// It will also put loops into canonical form (simplified and LCSSA).
class LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
- const int OptLevel;
+ LoopUnrollOptions UnrollOpts;
public:
/// This uses the target information (or flags) to control the thresholds for
/// different unrolling stategies but supports all of them.
- explicit LoopUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+ explicit LoopUnrollPass(LoopUnrollOptions UnrollOpts = {})
+ : UnrollOpts(UnrollOpts) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/MakeGuardsExplicit.h b/contrib/llvm/include/llvm/Transforms/Scalar/MakeGuardsExplicit.h
new file mode 100644
index 000000000000..41b4aada2baa
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/MakeGuardsExplicit.h
@@ -0,0 +1,47 @@
+//===-- MakeGuardsExplicit.h - Turn guard intrinsics into guard branches --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the @llvm.experimental.guard intrinsic to the new form of
+// guard represented as widenable explicit branch to the deopt block. The
+// difference between this pass and LowerGuardIntrinsic is that after this pass
+// the guard represented as intrinsic:
+//
+// call void(i1, ...) @llvm.experimental.guard(i1 %old_cond) [ "deopt"() ]
+//
+// transforms to a guard represented as widenable explicit branch:
+//
+// %widenable_cond = call i1 @llvm.experimental.widenable.condition()
+// br i1 (%old_cond & %widenable_cond), label %guarded, label %deopt
+//
+// Here:
+// - The semantics of @llvm.experimental.widenable.condition allows to replace
+// %widenable_cond with the construction (%widenable_cond & %any_other_cond)
+// without loss of correctness;
+// - %guarded is the lower part of old guard intrinsic's parent block split by
+// the intrinsic call;
+// - %deopt is a block containing a sole call to @llvm.experimental.deoptimize
+// intrinsic.
+//
+// Therefore, this branch preserves the property of widenability.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H
+#define LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct MakeGuardsExplicitPass : public PassInfoMixin<MakeGuardsExplicitPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif //LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/SCCP.h b/contrib/llvm/include/llvm/Transforms/Scalar/SCCP.h
index 2a294c95a17b..0abbb32fde6a 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/SCCP.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/SCCP.h
@@ -21,15 +21,17 @@
#ifndef LLVM_TRANSFORMS_SCALAR_SCCP_H
#define LLVM_TRANSFORMS_SCALAR_SCCP_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/PredicateInfo.h"
namespace llvm {
-class Function;
+class PostDominatorTree;
/// This pass performs function-level constant propagation and merging.
class SCCPPass : public PassInfoMixin<SCCPPass> {
@@ -37,7 +39,15 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI);
+/// Helper struct for bundling up the analysis results per function for IPSCCP.
+struct AnalysisResultsForFn {
+ std::unique_ptr<PredicateInfo> PredInfo;
+ DominatorTree *DT;
+ PostDominatorTree *PDT;
+};
+
+bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI,
+ function_ref<AnalysisResultsForFn(Function &)> getAnalysis);
} // end namespace llvm
#endif // LLVM_TRANSFORMS_SCALAR_SCCP_H
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/Scalarizer.h b/contrib/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
new file mode 100644
index 000000000000..1a0b9a2b638c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
@@ -0,0 +1,35 @@
+//===- Scalarizer.h --- Scalarize vector operations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass converts vector operations into scalar operations, in order
+/// to expose optimization opportunities on the individual scalar operations.
+/// It is mainly intended for targets that do not have vector units, but it
+/// may also be useful for revectorizing code to different vector widths.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SCALARIZER_H
+#define LLVM_TRANSFORMS_SCALAR_SCALARIZER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class ScalarizerPass : public PassInfoMixin<ScalarizerPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Create a legacy pass manager instance of the Scalarizer pass
+FunctionPass *createScalarizerPass();
+
+}
+
+#endif /* LLVM_TRANSFORMS_SCALAR_SCALARIZER_H */
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h b/contrib/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h
new file mode 100644
index 000000000000..018b22a932e6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h
@@ -0,0 +1,38 @@
+//===- WarnMissedTransforms.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Emit warnings if forced code transformations have not been performed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H
+#define LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Function;
+class Loop;
+class LPMUpdater;
+
+// New pass manager boilerplate.
+class WarnMissedTransformationsPass
+ : public PassInfoMixin<WarnMissedTransformationsPass> {
+public:
+ explicit WarnMissedTransformationsPass() {}
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+// Legacy pass manager boilerplate.
+Pass *createWarnMissedTransformationsPass();
+void initializeWarnMissedTransformationsLegacyPass(PassRegistry &);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils.h b/contrib/llvm/include/llvm/Transforms/Utils.h
index 0d997ce17b83..378552775c77 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils.h
@@ -113,6 +113,13 @@ extern char &LoopSimplifyID;
/// This function returns a new pass that downgrades the debug info in the
/// module to line tables only.
ModulePass *createStripNonLineTableDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ControlHeightReudction - Merges conditional blocks of code and reduces the
+// number of conditional branches in the hot paths based on profiles.
+//
+FunctionPass *createControlHeightReductionLegacyPass();
}
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 3dfc73b64842..5b16a2c0d0b1 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/DomTreeUpdater.h"
#include "llvm/IR/InstrTypes.h"
#include <cassert>
@@ -27,19 +28,27 @@ namespace llvm {
class BlockFrequencyInfo;
class BranchProbabilityInfo;
-class DeferredDominance;
class DominatorTree;
+class DomTreeUpdater;
class Function;
class Instruction;
class LoopInfo;
class MDNode;
class MemoryDependenceResults;
+class MemorySSAUpdater;
class ReturnInst;
class TargetLibraryInfo;
class Value;
/// Delete the specified block, which must have no predecessors.
-void DeleteDeadBlock(BasicBlock *BB, DeferredDominance *DDT = nullptr);
+void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
+
+/// Delete the specified blocks from \p BB. The set of deleted blocks must have
+/// no predecessors that are not being deleted themselves. \p BBs must have no
+/// duplicating blocks. If there are loops among this set of blocks, all
+/// relevant loop info updates should be done before this function is called.
+void DeleteDeadBlocks(SmallVectorImpl <BasicBlock *> &BBs,
+ DomTreeUpdater *DTU = nullptr);
/// We know that BB has one predecessor. If there are any single-entry PHI nodes
/// in it, fold them away. This handles the case when all entries to the PHI
@@ -56,10 +65,10 @@ bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// Attempts to merge a block into its predecessor, if possible. The return
/// value indicates success or failure.
-bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
LoopInfo *LI = nullptr,
- MemoryDependenceResults *MemDep = nullptr,
- DeferredDominance *DDT = nullptr);
+ MemorySSAUpdater *MSSAU = nullptr,
+ MemoryDependenceResults *MemDep = nullptr);
/// Replace all uses of an instruction (specified by BI) with a value, then
/// remove and delete the original instruction.
@@ -84,13 +93,15 @@ void ReplaceInstWithInst(Instruction *From, Instruction *To);
struct CriticalEdgeSplittingOptions {
DominatorTree *DT;
LoopInfo *LI;
+ MemorySSAUpdater *MSSAU;
bool MergeIdenticalEdges = false;
bool DontDeleteUselessPHIs = false;
bool PreserveLCSSA = false;
CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
- LoopInfo *LI = nullptr)
- : DT(DT), LI(LI) {}
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr)
+ : DT(DT), LI(LI), MSSAU(MSSAU) {}
CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
MergeIdenticalEdges = true;
@@ -124,7 +135,7 @@ struct CriticalEdgeSplittingOptions {
/// IndirectBrInst. Splitting these edges will almost always create an invalid
/// program because the address of the new block won't be the one that is jumped
/// to.
-BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
+BasicBlock *SplitCriticalEdge(Instruction *TI, unsigned SuccNum,
const CriticalEdgeSplittingOptions &Options =
CriticalEdgeSplittingOptions());
@@ -144,7 +155,7 @@ inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
const CriticalEdgeSplittingOptions &Options =
CriticalEdgeSplittingOptions()) {
bool MadeChange = false;
- TerminatorInst *TI = (*PI)->getTerminator();
+ Instruction *TI = (*PI)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
if (TI->getSuccessor(i) == Succ)
MadeChange |= !!SplitCriticalEdge(TI, i, Options);
@@ -158,7 +169,7 @@ inline BasicBlock *
SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
const CriticalEdgeSplittingOptions &Options =
CriticalEdgeSplittingOptions()) {
- TerminatorInst *TI = Src->getTerminator();
+ Instruction *TI = Src->getTerminator();
unsigned i = 0;
while (true) {
assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
@@ -176,14 +187,16 @@ unsigned SplitAllCriticalEdges(Function &F,
/// Split the edge connecting specified block.
BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
- DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
/// Split the specified block at the specified instruction - everything before
/// SplitPt stays in Old and everything starting with SplitPt moves to a new
/// block. The two blocks are joined by an unconditional branch and the loop
/// info is updated.
BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
- DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
/// This method introduces at least one new basic block into the function and
/// moves some of the predecessors of BB to be predecessors of the new block.
@@ -203,6 +216,7 @@ BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
const char *Suffix,
DominatorTree *DT = nullptr,
LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
bool PreserveLCSSA = false);
/// This method transforms the landing pad, OrigBB, by introducing two new basic
@@ -216,20 +230,19 @@ BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
/// no other analyses. In particular, it does not preserve LoopSimplify
/// (because it's complicated to handle the case where one of the edges being
/// split is an exit of a loop with other exits).
-void SplitLandingPadPredecessors(BasicBlock *OrigBB,
- ArrayRef<BasicBlock *> Preds,
- const char *Suffix, const char *Suffix2,
- SmallVectorImpl<BasicBlock *> &NewBBs,
- DominatorTree *DT = nullptr,
- LoopInfo *LI = nullptr,
- bool PreserveLCSSA = false);
+void SplitLandingPadPredecessors(
+ BasicBlock *OrigBB, ArrayRef<BasicBlock *> Preds, const char *Suffix,
+ const char *Suffix2, SmallVectorImpl<BasicBlock *> &NewBBs,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);
/// This method duplicates the specified return instruction into a predecessor
/// which ends in an unconditional branch. If the return instruction returns a
/// value defined by a PHI, propagate the right value into the return. It
/// returns the new return instruction in the predecessor.
ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
- BasicBlock *Pred);
+ BasicBlock *Pred,
+ DomTreeUpdater *DTU = nullptr);
/// Split the containing block at the specified instruction - everything before
/// SplitBefore stays in the old basic block, and the rest of the instructions
@@ -251,11 +264,11 @@ ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
/// Returns the NewBasicBlock's terminator.
///
/// Updates DT and LI if given.
-TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
- bool Unreachable,
- MDNode *BranchWeights = nullptr,
- DominatorTree *DT = nullptr,
- LoopInfo *LI = nullptr);
+Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+ bool Unreachable,
+ MDNode *BranchWeights = nullptr,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr);
/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
/// but also creates the ElseBlock.
@@ -272,8 +285,8 @@ TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
/// SplitBefore
/// Tail
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
- TerminatorInst **ThenTerm,
- TerminatorInst **ElseTerm,
+ Instruction **ThenTerm,
+ Instruction **ElseTerm,
MDNode *BranchWeights = nullptr);
/// Check whether BB is the merge point of a if-region.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index eafe07f49284..28efce6ac3fb 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -37,6 +37,12 @@ namespace llvm {
LibFunc DoubleFn, LibFunc FloatFn,
LibFunc LongDoubleFn);
+ /// Get the name of the overloaded unary floating point function
+ /// corresponding to \a Ty.
+ StringRef getUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+ LibFunc DoubleFn, LibFunc FloatFn,
+ LibFunc LongDoubleFn);
+
/// Return V if it is an i8*, otherwise cast it to i8*.
Value *castToCStr(Value *V, IRBuilder<> &B);
@@ -94,6 +100,13 @@ namespace llvm {
Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
const AttributeList &Attrs);
+ /// Emit a call to the unary function DoubleFn, FloatFn or LongDoubleFn,
+ /// depending of the type of Op.
+ Value *emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI,
+ LibFunc DoubleFn, LibFunc FloatFn,
+ LibFunc LongDoubleFn, IRBuilder<> &B,
+ const AttributeList &Attrs);
+
/// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
/// function is known to take type matching 'Op1' and 'Op2' and return one
/// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CanonicalizeAliases.h b/contrib/llvm/include/llvm/Transforms/Utils/CanonicalizeAliases.h
new file mode 100644
index 000000000000..f23263783fec
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CanonicalizeAliases.h
@@ -0,0 +1,32 @@
+//===-- CanonicalizeAliases.h - Alias Canonicalization Pass -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file canonicalizes aliases.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+#define LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that canonicalizes aliases.
+class CanonicalizeAliasesPass : public PassInfoMixin<CanonicalizeAliasesPass> {
+public:
+ CanonicalizeAliasesPass() = default;
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASESH
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
index 7531fb2d69b3..f5e997324fc8 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/InlineCost.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
@@ -46,9 +47,9 @@ class LoopInfo;
class Module;
class ProfileSummaryInfo;
class ReturnInst;
+class DomTreeUpdater;
/// Return an exact copy of the specified module
-///
std::unique_ptr<Module> CloneModule(const Module &M);
std::unique_ptr<Module> CloneModule(const Module &M, ValueToValueMapTy &VMap);
@@ -60,17 +61,15 @@ std::unique_ptr<Module>
CloneModule(const Module &M, ValueToValueMapTy &VMap,
function_ref<bool(const GlobalValue *)> ShouldCloneDefinition);
-/// ClonedCodeInfo - This struct can be used to capture information about code
+/// This struct can be used to capture information about code
/// being cloned, while it is being cloned.
struct ClonedCodeInfo {
- /// ContainsCalls - This is set to true if the cloned code contains a normal
- /// call instruction.
+ /// This is set to true if the cloned code contains a normal call instruction.
bool ContainsCalls = false;
- /// ContainsDynamicAllocas - This is set to true if the cloned code contains
- /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in
- /// the entry block or they are in the entry block but are not a constant
- /// size.
+ /// This is set to true if the cloned code contains a 'dynamic' alloca.
+ /// Dynamic allocas are allocas that are either not in the entry block or they
+ /// are in the entry block but are not a constant size.
bool ContainsDynamicAllocas = false;
/// All cloned call sites that have operand bundles attached are appended to
@@ -81,7 +80,7 @@ struct ClonedCodeInfo {
ClonedCodeInfo() = default;
};
-/// CloneBasicBlock - Return a copy of the specified basic block, but without
+/// Return a copy of the specified basic block, but without
/// embedding the block into a particular function. The block returned is an
/// exact copy of the specified basic block, without any remapping having been
/// performed. Because of this, this is only suitable for applications where
@@ -108,13 +107,12 @@ struct ClonedCodeInfo {
/// If you would like to collect additional information about the cloned
/// function, you can specify a ClonedCodeInfo object with the optional fifth
/// parameter.
-///
BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
const Twine &NameSuffix = "", Function *F = nullptr,
ClonedCodeInfo *CodeInfo = nullptr,
DebugInfoFinder *DIFinder = nullptr);
-/// CloneFunction - Return a copy of the specified function and add it to that
+/// Return a copy of the specified function and add it to that
/// function's module. Also, any references specified in the VMap are changed
/// to refer to their mapped value instead of the original one. If any of the
/// arguments to the function are in the VMap, the arguments are deleted from
@@ -153,7 +151,7 @@ void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr);
-/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
+/// This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly. The
/// effect of this is to copy significantly less code in cases where (for
/// example) a function call with constant arguments is inlined, and those
@@ -171,8 +169,8 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
ClonedCodeInfo *CodeInfo = nullptr,
Instruction *TheCall = nullptr);
-/// InlineFunctionInfo - This class captures the data input to the
-/// InlineFunction call, and records the auxiliary results produced by it.
+/// This class captures the data input to the InlineFunction call, and records
+/// the auxiliary results produced by it.
class InlineFunctionInfo {
public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
@@ -184,19 +182,19 @@ public:
: CG(cg), GetAssumptionCache(GetAssumptionCache), PSI(PSI),
CallerBFI(CallerBFI), CalleeBFI(CalleeBFI) {}
- /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+ /// If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
std::function<AssumptionCache &(Function &)> *GetAssumptionCache;
ProfileSummaryInfo *PSI;
BlockFrequencyInfo *CallerBFI, *CalleeBFI;
- /// StaticAllocas - InlineFunction fills this in with all static allocas that
- /// get copied into the caller.
+ /// InlineFunction fills this in with all static allocas that get copied into
+ /// the caller.
SmallVector<AllocaInst *, 4> StaticAllocas;
- /// InlinedCalls - InlineFunction fills this in with callsites that were
- /// inlined from the callee. This is only filled in if CG is non-null.
+ /// InlineFunction fills this in with callsites that were inlined from the
+ /// callee. This is only filled in if CG is non-null.
SmallVector<WeakTrackingVH, 8> InlinedCalls;
/// All of the new call sites inlined into the caller.
@@ -213,7 +211,7 @@ public:
}
};
-/// InlineFunction - This function inlines the called function into the basic
+/// This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
/// though.
@@ -232,13 +230,16 @@ public:
/// and all varargs at the callsite will be passed to any calls to
/// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
/// are only used by ForwardVarArgsTo.
-bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
- AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
-bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
- AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
-bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
- AAResults *CalleeAAR = nullptr, bool InsertLifetime = true,
- Function *ForwardVarArgsTo = nullptr);
+InlineResult InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr,
+ bool InsertLifetime = true);
+InlineResult InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr,
+ bool InsertLifetime = true);
+InlineResult InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr,
+ bool InsertLifetime = true,
+ Function *ForwardVarArgsTo = nullptr);
/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
/// Blocks.
@@ -262,11 +263,12 @@ void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
/// we replace them with the uses of corresponding Phi inputs. ValueMapping
/// is used to map the original instructions from BB to their newly-created
/// copies. Returns the split block.
-BasicBlock *
-DuplicateInstructionsInSplitBetween(BasicBlock *BB, BasicBlock *PredBB,
- Instruction *StopAt,
- ValueToValueMapTy &ValueMapping,
- DominatorTree *DT = nullptr);
+BasicBlock *DuplicateInstructionsInSplitBetween(BasicBlock *BB,
+ BasicBlock *PredBB,
+ Instruction *StopAt,
+ ValueToValueMapTy &ValueMapping,
+ DomTreeUpdater &DTU);
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_CLONING_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
index 0e5254acb0d3..fee79fdc3bff 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include <limits>
namespace llvm {
@@ -26,6 +27,7 @@ class BasicBlock;
class BlockFrequency;
class BlockFrequencyInfo;
class BranchProbabilityInfo;
+class CallInst;
class DominatorTree;
class Function;
class Instruction;
@@ -64,6 +66,11 @@ class Value;
unsigned NumExitBlocks = std::numeric_limits<unsigned>::max();
Type *RetTy;
+ // Suffix to use when creating extracted function (appended to the original
+ // function name + "."). If empty, the default is to use the entry block
+ // label, if non-empty, otherwise "extracted".
+ std::string Suffix;
+
public:
/// Create a code extractor for a sequence of blocks.
///
@@ -78,7 +85,8 @@ class Value;
CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
BranchProbabilityInfo *BPI = nullptr,
- bool AllowVarArgs = false, bool AllowAlloca = false);
+ bool AllowVarArgs = false, bool AllowAlloca = false,
+ std::string Suffix = "");
/// Create a code extractor for a loop body.
///
@@ -86,7 +94,8 @@ class Value;
/// block sequence of the loop.
CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
BlockFrequencyInfo *BFI = nullptr,
- BranchProbabilityInfo *BPI = nullptr);
+ BranchProbabilityInfo *BPI = nullptr,
+ std::string Suffix = "");
/// Perform the extraction, returning the new function.
///
@@ -139,7 +148,8 @@ class Value;
BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
private:
- void severSplitPHINodes(BasicBlock *&Header);
+ void severSplitPHINodesOfEntry(BasicBlock *&Header);
+ void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
void splitReturnBlocks();
Function *constructFunction(const ValueSet &inputs,
@@ -155,10 +165,9 @@ class Value;
DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
BranchProbabilityInfo *BPI);
- void emitCallAndSwitchStatement(Function *newFunction,
- BasicBlock *newHeader,
- ValueSet &inputs,
- ValueSet &outputs);
+ CallInst *emitCallAndSwitchStatement(Function *newFunction,
+ BasicBlock *newHeader,
+ ValueSet &inputs, ValueSet &outputs);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/FunctionImportUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/FunctionImportUtils.h
index b9fbef04cdc3..e24398b90012 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/FunctionImportUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -114,6 +114,9 @@ bool renameModuleForThinLTO(
Module &M, const ModuleSummaryIndex &Index,
SetVector<GlobalValue *> *GlobalsToImport = nullptr);
+/// Compute synthetic function entry counts.
+void computeSyntheticCounts(ModuleSummaryIndex &Index);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/GuardUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/GuardUtils.h
new file mode 100644
index 000000000000..537045edafe4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/GuardUtils.h
@@ -0,0 +1,30 @@
+//===-- GuardUtils.h - Utils for work with guards ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Utils that are used to perform transformations related to guards and their
+// conditions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
+#define LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
+
+namespace llvm {
+
+class CallInst;
+class Function;
+
+/// Splits control flow at point of \p Guard, replacing it with explicit branch
+/// by the condition of guard's first argument. The taken branch then goes to
+/// the block that contains \p Guard's successors, and the non-taken branch
+/// goes to a newly-created deopt block that contains a sole call of the
+/// deoptimize function \p DeoptIntrinsic.
+void makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard);
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
index b8df32565723..ec8b0eda3641 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -26,6 +26,7 @@
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DomTreeUpdater.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Operator.h"
@@ -43,7 +44,7 @@ class AssumptionCache;
class BasicBlock;
class BranchInst;
class CallInst;
-class DbgInfoIntrinsic;
+class DbgVariableIntrinsic;
class DbgValueInst;
class DIBuilder;
class Function;
@@ -51,6 +52,7 @@ class Instruction;
class LazyValueInfo;
class LoadInst;
class MDNode;
+class MemorySSAUpdater;
class PHINode;
class StoreInst;
class TargetLibraryInfo;
@@ -120,7 +122,7 @@ struct SimplifyCFGOptions {
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
const TargetLibraryInfo *TLI = nullptr,
- DeferredDominance *DDT = nullptr);
+ DomTreeUpdater *DTU = nullptr);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@@ -140,8 +142,9 @@ bool wouldInstructionBeTriviallyDead(Instruction *I,
/// If the specified value is a trivially dead instruction, delete it.
/// If that makes any of its operands trivially dead, delete them too,
/// recursively. Return true if any instructions were deleted.
-bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
- const TargetLibraryInfo *TLI = nullptr);
+bool RecursivelyDeleteTriviallyDeadInstructions(
+ Value *V, const TargetLibraryInfo *TLI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
/// Delete all of the instructions in `DeadInsts`, and all other instructions
/// that deleting these in turn causes to be trivially dead.
@@ -153,7 +156,7 @@ bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
/// empty afterward.
void RecursivelyDeleteTriviallyDeadInstructions(
SmallVectorImpl<Instruction *> &DeadInsts,
- const TargetLibraryInfo *TLI = nullptr);
+ const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr);
/// If the specified value is an effectively dead PHI node, due to being a
/// def-use chain of single-use nodes that either forms a cycle or is terminated
@@ -171,6 +174,12 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN,
bool SimplifyInstructionsInBlock(BasicBlock *BB,
const TargetLibraryInfo *TLI = nullptr);
+/// Replace all the uses of an SSA value in @llvm.dbg intrinsics with
+/// undef. This is useful for signaling that a variable, e.g. has been
+/// found dead and hence it's unavailable at a given program point.
+/// Returns true if the dbg values have been changed.
+bool replaceDbgUsesWithUndef(Instruction *I);
+
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
//
@@ -187,20 +196,19 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB,
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- DeferredDominance *DDT = nullptr);
+ DomTreeUpdater *DTU = nullptr);
/// BB is a block with one predecessor and its predecessor is known to have one
/// successor (BB!). Eliminate the edge between them, moving the instructions in
/// the predecessor into BB. This deletes the predecessor block.
-void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr,
- DeferredDominance *DDT = nullptr);
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
/// BB is known to contain an unconditional branch, and contains no instructions
/// other than PHI nodes, potential debug intrinsics and the branch. If
/// possible, eliminate BB by rewriting all the predecessors to branch to the
/// successor block and return true. If we can't transform, return false.
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
- DeferredDominance *DDT = nullptr);
+ DomTreeUpdater *DTU = nullptr);
/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
/// to be clever about PHI nodes which differ only in the order of the incoming
@@ -270,17 +278,17 @@ inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
StoreInst *SI, DIBuilder &Builder);
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
LoadInst *LI, DIBuilder &Builder);
/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
-void ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
PHINode *LI, DIBuilder &Builder);
/// Lowers llvm.dbg.declare intrinsics into appropriate set of
@@ -294,13 +302,13 @@ void insertDebugValuesForPHIs(BasicBlock *BB,
/// Finds all intrinsics declaring local variables as living in the memory that
/// 'V' points to. This may include a mix of dbg.declare and
/// dbg.addr intrinsics.
-TinyPtrVector<DbgInfoIntrinsic *> FindDbgAddrUses(Value *V);
+TinyPtrVector<DbgVariableIntrinsic *> FindDbgAddrUses(Value *V);
/// Finds the llvm.dbg.value intrinsics describing a value.
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
/// Finds the debug info intrinsics describing a value.
-void findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgInsts, Value *V);
+void findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgInsts, Value *V);
/// Replaces llvm.dbg.declare instruction when the address it
/// describes is replaced with a new value. If Deref is true, an
@@ -359,7 +367,7 @@ unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
/// instruction, making it and the rest of the code in the block dead.
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
bool PreserveLCSSA = false,
- DeferredDominance *DDT = nullptr);
+ DomTreeUpdater *DTU = nullptr);
/// Convert the CallInst to InvokeInst with the specified unwind edge basic
/// block. This also splits the basic block where CI is located, because
@@ -374,24 +382,36 @@ BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
///
/// \param BB Block whose terminator will be replaced. Its terminator must
/// have an unwind successor.
-void removeUnwindEdge(BasicBlock *BB, DeferredDominance *DDT = nullptr);
+void removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
/// Remove all blocks that can not be reached from the function's entry.
///
/// Returns true if any basic block was removed.
bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
- DeferredDominance *DDT = nullptr);
+ DomTreeUpdater *DTU = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
-/// Combine the metadata of two instructions so that K can replace J
+/// Combine the metadata of two instructions so that K can replace J. Some
+/// metadata kinds can only be kept if K does not move, meaning it dominated
+/// J in the original IR.
///
/// Metadata not listed as known via KnownIDs is removed
-void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
+void combineMetadata(Instruction *K, const Instruction *J,
+ ArrayRef<unsigned> KnownIDs, bool DoesKMove);
/// Combine the metadata of two instructions so that K can replace J. This
-/// specifically handles the case of CSE-like transformations.
+/// specifically handles the case of CSE-like transformations. Some
+/// metadata can only be kept if K dominates J. For this to be correct,
+/// K cannot be hoisted.
///
/// Unknown metadata is removed.
-void combineMetadataForCSE(Instruction *K, const Instruction *J);
+void combineMetadataForCSE(Instruction *K, const Instruction *J,
+ bool DoesKMove);
+
+/// Patch the replacement so that it is not more restrictive than the value
+/// being replaced. It assumes that the replacement does not get moved from
+/// its original position.
+void patchReplacementInstruction(Instruction *I, Value *Repl);
// Replace each use of 'From' with 'To', if that use does not belong to basic
// block where 'From' is defined. Returns the number of replacements made.
@@ -429,6 +449,18 @@ void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
LoadInst &NewLI);
+/// Remove the debug intrinsic instructions for the given instruction.
+void dropDebugUsers(Instruction &I);
+
+/// Hoist all of the instructions in the \p IfBlock to the dominant block
+/// \p DomBlock, by moving its instructions to the insertion point \p InsertPt.
+///
+/// The moved instructions receive the insertion point debug location values
+/// (DILocations) and their debug intrinsic instructions (dbg.values) are
+/// removed.
+void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
+ BasicBlock *BB);
+
//===----------------------------------------------------------------------===//
// Intrinsic pattern matching
//
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
index 231e5bbb6dee..cd5bc4301018 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/LoopRotationUtils.h
@@ -20,6 +20,7 @@ class AssumptionCache;
class DominatorTree;
class Loop;
class LoopInfo;
+class MemorySSAUpdater;
class ScalarEvolution;
struct SimplifyQuery;
class TargetTransformInfo;
@@ -32,8 +33,8 @@ class TargetTransformInfo;
/// LoopRotation. If it is true, the profitability heuristic will be ignored.
bool LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
AssumptionCache *AC, DominatorTree *DT, ScalarEvolution *SE,
- const SimplifyQuery &SQ, bool RotationOnly,
- unsigned Threshold, bool IsUtilMode);
+ MemorySSAUpdater *MSSAU, const SimplifyQuery &SQ,
+ bool RotationOnly, unsigned Threshold, bool IsUtilMode);
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index eb4c99102a63..8c2527b6ae68 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -23,6 +23,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/IVDescriptors.h"
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
@@ -40,6 +41,7 @@ class BasicBlock;
class DataLayout;
class Loop;
class LoopInfo;
+class MemorySSAUpdater;
class OptimizationRemarkEmitter;
class PredicatedScalarEvolution;
class PredIteratorCache;
@@ -48,318 +50,6 @@ class SCEV;
class TargetLibraryInfo;
class TargetTransformInfo;
-
-/// The RecurrenceDescriptor is used to identify recurrences variables in a
-/// loop. Reduction is a special case of recurrence that has uses of the
-/// recurrence variable outside the loop. The method isReductionPHI identifies
-/// reductions that are basic recurrences.
-///
-/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
-/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
-/// array[i]; } is a summation of array elements. Basic recurrences are a
-/// special case of chains of recurrences (CR). See ScalarEvolution for CR
-/// references.
-
-/// This struct holds information about recurrence variables.
-class RecurrenceDescriptor {
-public:
- /// This enum represents the kinds of recurrences that we support.
- enum RecurrenceKind {
- RK_NoRecurrence, ///< Not a recurrence.
- RK_IntegerAdd, ///< Sum of integers.
- RK_IntegerMult, ///< Product of integers.
- RK_IntegerOr, ///< Bitwise or logical OR of numbers.
- RK_IntegerAnd, ///< Bitwise or logical AND of numbers.
- RK_IntegerXor, ///< Bitwise or logical XOR of numbers.
- RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
- RK_FloatAdd, ///< Sum of floats.
- RK_FloatMult, ///< Product of floats.
- RK_FloatMinMax ///< Min/max implemented in terms of select(cmp()).
- };
-
- // This enum represents the kind of minmax recurrence.
- enum MinMaxRecurrenceKind {
- MRK_Invalid,
- MRK_UIntMin,
- MRK_UIntMax,
- MRK_SIntMin,
- MRK_SIntMax,
- MRK_FloatMin,
- MRK_FloatMax
- };
-
- RecurrenceDescriptor() = default;
-
- RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
- MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
- bool Signed, SmallPtrSetImpl<Instruction *> &CI)
- : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
- UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
- CastInsts.insert(CI.begin(), CI.end());
- }
-
- /// This POD struct holds information about a potential recurrence operation.
- class InstDesc {
- public:
- InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
- : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
- UnsafeAlgebraInst(UAI) {}
-
- InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
- : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
- UnsafeAlgebraInst(UAI) {}
-
- bool isRecurrence() { return IsRecurrence; }
-
- bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
-
- Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
-
- MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
-
- Instruction *getPatternInst() { return PatternLastInst; }
-
- private:
- // Is this instruction a recurrence candidate.
- bool IsRecurrence;
- // The last instruction in a min/max pattern (select of the select(icmp())
- // pattern), or the current recurrence instruction otherwise.
- Instruction *PatternLastInst;
- // If this is a min/max pattern the comparison predicate.
- MinMaxRecurrenceKind MinMaxKind;
- // Recurrence has unsafe algebra.
- Instruction *UnsafeAlgebraInst;
- };
-
- /// Returns a struct describing if the instruction 'I' can be a recurrence
- /// variable of type 'Kind'. If the recurrence is a min/max pattern of
- /// select(icmp()) this function advances the instruction pointer 'I' from the
- /// compare instruction to the select instruction and stores this pointer in
- /// 'PatternLastInst' member of the returned struct.
- static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
- InstDesc &Prev, bool HasFunNoNaNAttr);
-
- /// Returns true if instruction I has multiple uses in Insts
- static bool hasMultipleUsesOf(Instruction *I,
- SmallPtrSetImpl<Instruction *> &Insts);
-
- /// Returns true if all uses of the instruction I is within the Set.
- static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
-
- /// Returns a struct describing if the instruction if the instruction is a
- /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
- /// or max(X, Y).
- static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
-
- /// Returns identity corresponding to the RecurrenceKind.
- static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
-
- /// Returns the opcode of binary operation corresponding to the
- /// RecurrenceKind.
- static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
-
- /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
- static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK,
- Value *Left, Value *Right);
-
- /// Returns true if Phi is a reduction of type Kind and adds it to the
- /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
- /// non-null, the minimal bit width needed to compute the reduction will be
- /// computed.
- static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
- bool HasFunNoNaNAttr,
- RecurrenceDescriptor &RedDes,
- DemandedBits *DB = nullptr,
- AssumptionCache *AC = nullptr,
- DominatorTree *DT = nullptr);
-
- /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor
- /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
- /// non-null, the minimal bit width needed to compute the reduction will be
- /// computed.
- static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
- RecurrenceDescriptor &RedDes,
- DemandedBits *DB = nullptr,
- AssumptionCache *AC = nullptr,
- DominatorTree *DT = nullptr);
-
- /// Returns true if Phi is a first-order recurrence. A first-order recurrence
- /// is a non-reduction recurrence relation in which the value of the
- /// recurrence in the current loop iteration equals a value defined in the
- /// previous iteration. \p SinkAfter includes pairs of instructions where the
- /// first will be rescheduled to appear after the second if/when the loop is
- /// vectorized. It may be augmented with additional pairs if needed in order
- /// to handle Phi as a first-order recurrence.
- static bool
- isFirstOrderRecurrence(PHINode *Phi, Loop *TheLoop,
- DenseMap<Instruction *, Instruction *> &SinkAfter,
- DominatorTree *DT);
-
- RecurrenceKind getRecurrenceKind() { return Kind; }
-
- MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
-
- TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
-
- Instruction *getLoopExitInstr() { return LoopExitInstr; }
-
- /// Returns true if the recurrence has unsafe algebra which requires a relaxed
- /// floating-point model.
- bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
-
- /// Returns first unsafe algebra instruction in the PHI node's use-chain.
- Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
-
- /// Returns true if the recurrence kind is an integer kind.
- static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
-
- /// Returns true if the recurrence kind is a floating point kind.
- static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
-
- /// Returns true if the recurrence kind is an arithmetic kind.
- static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
-
- /// Returns the type of the recurrence. This type can be narrower than the
- /// actual type of the Phi if the recurrence has been type-promoted.
- Type *getRecurrenceType() { return RecurrenceType; }
-
- /// Returns a reference to the instructions used for type-promoting the
- /// recurrence.
- SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
-
- /// Returns true if all source operands of the recurrence are SExtInsts.
- bool isSigned() { return IsSigned; }
-
-private:
- // The starting value of the recurrence.
- // It does not have to be zero!
- TrackingVH<Value> StartValue;
- // The instruction who's value is used outside the loop.
- Instruction *LoopExitInstr = nullptr;
- // The kind of the recurrence.
- RecurrenceKind Kind = RK_NoRecurrence;
- // If this a min/max recurrence the kind of recurrence.
- MinMaxRecurrenceKind MinMaxKind = MRK_Invalid;
- // First occurrence of unasfe algebra in the PHI's use-chain.
- Instruction *UnsafeAlgebraInst = nullptr;
- // The type of the recurrence.
- Type *RecurrenceType = nullptr;
- // True if all source operands of the recurrence are SExtInsts.
- bool IsSigned = false;
- // Instructions used for type-promoting the recurrence.
- SmallPtrSet<Instruction *, 8> CastInsts;
-};
-
-/// A struct for saving information about induction variables.
-class InductionDescriptor {
-public:
- /// This enum represents the kinds of inductions that we support.
- enum InductionKind {
- IK_NoInduction, ///< Not an induction variable.
- IK_IntInduction, ///< Integer induction variable. Step = C.
- IK_PtrInduction, ///< Pointer induction var. Step = C / sizeof(elem).
- IK_FpInduction ///< Floating point induction variable.
- };
-
-public:
- /// Default constructor - creates an invalid induction.
- InductionDescriptor() = default;
-
- /// Get the consecutive direction. Returns:
- /// 0 - unknown or non-consecutive.
- /// 1 - consecutive and increasing.
- /// -1 - consecutive and decreasing.
- int getConsecutiveDirection() const;
-
- /// Compute the transformed value of Index at offset StartValue using step
- /// StepValue.
- /// For integer induction, returns StartValue + Index * StepValue.
- /// For pointer induction, returns StartValue[Index * StepValue].
- /// FIXME: The newly created binary instructions should contain nsw/nuw
- /// flags, which can be found from the original scalar operations.
- Value *transform(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
- const DataLayout& DL) const;
-
- Value *getStartValue() const { return StartValue; }
- InductionKind getKind() const { return IK; }
- const SCEV *getStep() const { return Step; }
- ConstantInt *getConstIntStepValue() const;
-
- /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
- /// induction, the induction descriptor \p D will contain the data describing
- /// this induction. If by some other means the caller has a better SCEV
- /// expression for \p Phi than the one returned by the ScalarEvolution
- /// analysis, it can be passed through \p Expr. If the def-use chain
- /// associated with the phi includes casts (that we know we can ignore
- /// under proper runtime checks), they are passed through \p CastsToIgnore.
- static bool
- isInductionPHI(PHINode *Phi, const Loop* L, ScalarEvolution *SE,
- InductionDescriptor &D, const SCEV *Expr = nullptr,
- SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);
-
- /// Returns true if \p Phi is a floating point induction in the loop \p L.
- /// If \p Phi is an induction, the induction descriptor \p D will contain
- /// the data describing this induction.
- static bool isFPInductionPHI(PHINode *Phi, const Loop* L,
- ScalarEvolution *SE, InductionDescriptor &D);
-
- /// Returns true if \p Phi is a loop \p L induction, in the context associated
- /// with the run-time predicate of PSE. If \p Assume is true, this can add
- /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
- /// induction.
- /// If \p Phi is an induction, \p D will contain the data describing this
- /// induction.
- static bool isInductionPHI(PHINode *Phi, const Loop* L,
- PredicatedScalarEvolution &PSE,
- InductionDescriptor &D, bool Assume = false);
-
- /// Returns true if the induction type is FP and the binary operator does
- /// not have the "fast-math" property. Such operation requires a relaxed FP
- /// mode.
- bool hasUnsafeAlgebra() {
- return InductionBinOp && !cast<FPMathOperator>(InductionBinOp)->isFast();
- }
-
- /// Returns induction operator that does not have "fast-math" property
- /// and requires FP unsafe mode.
- Instruction *getUnsafeAlgebraInst() {
- if (!InductionBinOp || cast<FPMathOperator>(InductionBinOp)->isFast())
- return nullptr;
- return InductionBinOp;
- }
-
- /// Returns binary opcode of the induction operator.
- Instruction::BinaryOps getInductionOpcode() const {
- return InductionBinOp ? InductionBinOp->getOpcode() :
- Instruction::BinaryOpsEnd;
- }
-
- /// Returns a reference to the type cast instructions in the induction
- /// update chain, that are redundant when guarded with a runtime
- /// SCEV overflow check.
- const SmallVectorImpl<Instruction *> &getCastInsts() const {
- return RedundantCasts;
- }
-
-private:
- /// Private constructor - used by \c isInductionPHI.
- InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
- BinaryOperator *InductionBinOp = nullptr,
- SmallVectorImpl<Instruction *> *Casts = nullptr);
-
- /// Start value.
- TrackingVH<Value> StartValue;
- /// Induction kind.
- InductionKind IK = IK_NoInduction;
- /// Step value.
- const SCEV *Step = nullptr;
- // Instruction that advances induction variable.
- BinaryOperator *InductionBinOp = nullptr;
- // Instructions used for type-casts of the induction variable,
- // that are redundant when guarded with a runtime SCEV overflow check.
- SmallVector<Instruction *, 2> RedundantCasts;
-};
-
BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
bool PreserveLCSSA);
@@ -420,7 +110,7 @@ bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
TargetLibraryInfo *, TargetTransformInfo *, Loop *,
- AliasSetTracker *, LoopSafetyInfo *,
+ AliasSetTracker *, MemorySSAUpdater *, ICFLoopSafetyInfo *,
OptimizationRemarkEmitter *ORE);
/// Walk the specified region of the CFG (defined by all blocks
@@ -433,7 +123,8 @@ bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
/// ORE. It returns changed status.
bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
TargetLibraryInfo *, Loop *, AliasSetTracker *,
- LoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
+ MemorySSAUpdater *, ICFLoopSafetyInfo *,
+ OptimizationRemarkEmitter *ORE);
/// This function deletes dead loops. The caller of this function needs to
/// guarantee that the loop is infact dead.
@@ -462,7 +153,8 @@ bool promoteLoopAccessesToScalars(const SmallSetVector<Value *, 8> &,
SmallVectorImpl<Instruction *> &,
PredIteratorCache &, LoopInfo *,
DominatorTree *, const TargetLibraryInfo *,
- Loop *, AliasSetTracker *, LoopSafetyInfo *,
+ Loop *, AliasSetTracker *,
+ ICFLoopSafetyInfo *,
OptimizationRemarkEmitter *);
/// Does a BFS from a given node to all of its children inside a given loop.
@@ -478,9 +170,80 @@ SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
/// operand or null otherwise. If the string metadata is not found return
/// Optional's not-a-value.
-Optional<const MDOperand *> findStringMetadataForLoop(Loop *TheLoop,
+Optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
StringRef Name);
+/// Find named metadata for a loop with an integer value.
+llvm::Optional<int> getOptionalIntLoopAttribute(Loop *TheLoop, StringRef Name);
+
+/// Create a new loop identifier for a loop created from a loop transformation.
+///
+/// @param OrigLoopID The loop ID of the loop before the transformation.
+/// @param FollowupAttrs List of attribute names that contain attributes to be
+/// added to the new loop ID.
+/// @param InheritOptionsAttrsPrefix Selects which attributes should be inherited
+/// from the original loop. The following values
+/// are considered:
+/// nullptr : Inherit all attributes from @p OrigLoopID.
+/// "" : Do not inherit any attribute from @p OrigLoopID; only use
+/// those specified by a followup attribute.
+/// "<prefix>": Inherit all attributes except those which start with
+/// <prefix>; commonly used to remove metadata for the
+/// applied transformation.
+/// @param AlwaysNew If true, do not try to reuse OrigLoopID and never return
+/// None.
+///
+/// @return The loop ID for the after-transformation loop. The following values
+/// can be returned:
+/// None : No followup attribute was found; it is up to the
+/// transformation to choose attributes that make sense.
+/// @p OrigLoopID: The original identifier can be reused.
+/// nullptr : The new loop has no attributes.
+/// MDNode* : A new unique loop identifier.
+Optional<MDNode *>
+makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
+ const char *InheritOptionsAttrsPrefix = "",
+ bool AlwaysNew = false);
+
+/// Look for the loop attribute that disables all transformation heuristic.
+bool hasDisableAllTransformsHint(const Loop *L);
+
+/// The mode sets how eager a transformation should be applied.
+enum TransformationMode {
+ /// The pass can use heuristics to determine whether a transformation should
+ /// be applied.
+ TM_Unspecified,
+
+ /// The transformation should be applied without considering a cost model.
+ TM_Enable,
+
+ /// The transformation should not be applied.
+ TM_Disable,
+
+ /// Force is a flag and should not be used alone.
+ TM_Force = 0x04,
+
+ /// The transformation was directed by the user, e.g. by a #pragma in
+ /// the source code. If the transformation could not be applied, a
+ /// warning should be emitted.
+ TM_ForcedByUser = TM_Enable | TM_Force,
+
+ /// The transformation must not be applied. For instance, `#pragma clang loop
+ /// unroll(disable)` explicitly forbids any unrolling to take place. Unlike
+ /// general loop metadata, it must not be dropped. Most passes should not
+ /// behave differently under TM_Disable and TM_SuppressedByUser.
+ TM_SuppressedByUser = TM_Disable | TM_Force
+};
+
+/// @{
+/// Get the mode for LLVM's supported loop transformations.
+TransformationMode hasUnrollTransformation(Loop *L);
+TransformationMode hasUnrollAndJamTransformation(Loop *L);
+TransformationMode hasVectorizeTransformation(Loop *L);
+TransformationMode hasDistributeTransformation(Loop *L);
+TransformationMode hasLICMVersioningTransformation(Loop *L);
+/// @}
+
/// Set input string into loop metadata by keeping other values intact.
void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
unsigned V = 0);
@@ -490,6 +253,11 @@ void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
/// estimate can not be made.
Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
+/// Check inner loop (L) backedge count is known to be invariant on all
+/// iterations of its outer loop. If the loop has no parent, this is trivially
+/// true.
+bool hasIterationCountInvariantInParent(Loop *L, ScalarEvolution &SE);
+
/// Helper to consistently add the set of standard passes to a loop pass's \c
/// AnalysisUsage.
///
@@ -497,18 +265,25 @@ Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
/// getAnalysisUsage.
void getLoopAnalysisUsage(AnalysisUsage &AU);
-/// Returns true if the hoister and sinker can handle this instruction.
-/// If SafetyInfo is null, we are checking for sinking instructions from
-/// preheader to loop body (no speculation).
-/// If SafetyInfo is not null, we are checking for hoisting/sinking
-/// instructions from loop body to preheader/exit. Check if the instruction
-/// can execute speculatively.
+/// Returns true if is legal to hoist or sink this instruction disregarding the
+/// possible introduction of faults. Reasoning about potential faulting
+/// instructions is the responsibility of the caller since it is challenging to
+/// do efficiently from within this routine.
+/// \p TargetExecutesOncePerLoop is true only when it is guaranteed that the
+/// target executes at most once per execution of the loop body. This is used
+/// to assess the legality of duplicating atomic loads. Generally, this is
+/// true when moving out of loop and not true when moving into loops.
/// If \p ORE is set use it to emit optimization remarks.
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
Loop *CurLoop, AliasSetTracker *CurAST,
- LoopSafetyInfo *SafetyInfo,
+ MemorySSAUpdater *MSSAU, bool TargetExecutesOncePerLoop,
OptimizationRemarkEmitter *ORE = nullptr);
+/// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+Value *createMinMaxOp(IRBuilder<> &Builder,
+ RecurrenceDescriptor::MinMaxRecurrenceKind RK,
+ Value *Left, Value *Right);
+
/// Generates an ordered vector reduction using extracts to reduce the value.
Value *
getOrderedReduction(IRBuilder<> &Builder, Value *Acc, Value *Src, unsigned Op,
@@ -527,12 +302,12 @@ Value *getShuffleReduction(IRBuilder<> &Builder, Value *Src, unsigned Op,
/// additional information supplied in \p Flags.
/// The target is queried to determine if intrinsics or shuffle sequences are
/// required to implement the reduction.
-Value *
-createSimpleTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
- unsigned Opcode, Value *Src,
- TargetTransformInfo::ReductionFlags Flags =
- TargetTransformInfo::ReductionFlags(),
- ArrayRef<Value *> RedOps = None);
+Value *createSimpleTargetReduction(IRBuilder<> &B,
+ const TargetTransformInfo *TTI,
+ unsigned Opcode, Value *Src,
+ TargetTransformInfo::ReductionFlags Flags =
+ TargetTransformInfo::ReductionFlags(),
+ ArrayRef<Value *> RedOps = None);
/// Create a generic target reduction using a recurrence descriptor \p Desc
/// The target is queried to determine if intrinsics or shuffle sequences are
@@ -548,6 +323,23 @@ Value *createTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
/// Flag set: NSW, NUW, exact, and all of fast-math.
void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
+/// Returns true if we can prove that \p S is defined and always negative in
+/// loop \p L.
+bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
+
+/// Returns true if we can prove that \p S is defined and always non-negative in
+/// loop \p L.
+bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
+ ScalarEvolution &SE);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned max.
+bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+ bool Signed);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned min.
+bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+ bool Signed);
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
index 14615c25d093..fee492be2a90 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -58,6 +58,24 @@ std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
StringRef VersionCheckName = StringRef());
+/// Creates sanitizer constructor function lazily. If a constructor and init
+/// function already exist, this function returns it. Otherwise it calls \c
+/// createSanitizerCtorAndInitFunctions. The FunctionsCreatedCallback is invoked
+/// in that case, passing the new Ctor and Init function.
+///
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> getOrCreateSanitizerCtorAndInitFunctions(
+ Module &M, StringRef CtorName, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+ function_ref<void(Function *, Function *)> FunctionsCreatedCallback,
+ StringRef VersionCheckName = StringRef());
+
+// Creates and returns a sanitizer init function without argument if it doesn't
+// exist, and adds it to the global constructors list. Otherwise it returns the
+// existing function.
+Function *getOrCreateInitFunction(Module &M, StringRef Name);
+
/// Rename all the anon globals in the module using a hash computed from
/// the list of public globals in the module.
bool nameUnamedGlobals(Module &M);
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/PredicateInfo.h b/contrib/llvm/include/llvm/Transforms/Utils/PredicateInfo.h
index b53eda7e5a42..2fc38089f3f1 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/PredicateInfo.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -60,6 +60,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/OrderedInstructions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
@@ -76,7 +77,6 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Transforms/Utils/OrderedInstructions.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index d007f909c6a4..025bcd44e310 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -77,21 +77,34 @@ private:
OptimizationRemarkEmitter &ORE;
bool UnsafeFPShrink;
function_ref<void(Instruction *, Value *)> Replacer;
+ function_ref<void(Instruction *)> Eraser;
/// Internal wrapper for RAUW that is the default implementation.
///
/// Other users may provide an alternate function with this signature instead
/// of this one.
- static void replaceAllUsesWithDefault(Instruction *I, Value *With);
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With) {
+ I->replaceAllUsesWith(With);
+ }
+
+ /// Internal wrapper for eraseFromParent that is the default implementation.
+ static void eraseFromParentDefault(Instruction *I) { I->eraseFromParent(); }
/// Replace an instruction's uses with a value using our replacer.
void replaceAllUsesWith(Instruction *I, Value *With);
+ /// Erase an instruction from its parent with our eraser.
+ void eraseFromParent(Instruction *I);
+
+ Value *foldMallocMemset(CallInst *Memset, IRBuilder<> &B);
+
public:
- LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
- OptimizationRemarkEmitter &ORE,
- function_ref<void(Instruction *, Value *)> Replacer =
- &replaceAllUsesWithDefault);
+ LibCallSimplifier(
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
+ OptimizationRemarkEmitter &ORE,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault,
+ function_ref<void(Instruction *)> Eraser = &eraseFromParentDefault);
/// optimizeCall - Take the given call instruction and return a more
/// optimal value to replace the instruction with or 0 if a more
@@ -131,8 +144,8 @@ private:
// Math Library Optimizations
Value *optimizeCAbs(CallInst *CI, IRBuilder<> &B);
- Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
Value *optimizePow(CallInst *CI, IRBuilder<> &B);
+ Value *replacePowWithExp(CallInst *Pow, IRBuilder<> &B);
Value *replacePowWithSqrt(CallInst *Pow, IRBuilder<> &B);
Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
index a6b84af068a5..70e936d75008 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -35,6 +35,15 @@ class ScalarEvolution;
using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
+/// @{
+/// Metadata attribute names
+const char *const LLVMLoopUnrollFollowupAll = "llvm.loop.unroll.followup_all";
+const char *const LLVMLoopUnrollFollowupUnrolled =
+ "llvm.loop.unroll.followup_unrolled";
+const char *const LLVMLoopUnrollFollowupRemainder =
+ "llvm.loop.unroll.followup_remainder";
+/// @}
+
const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
BasicBlock *ClonedBB, LoopInfo *LI,
NewLoopsMap &NewLoops);
@@ -61,15 +70,16 @@ LoopUnrollResult UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
unsigned PeelCount, bool UnrollRemainder,
LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
- OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
+ OptimizationRemarkEmitter *ORE, bool PreserveLCSSA,
+ Loop **RemainderLoop = nullptr);
bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
bool AllowExpensiveTripCount,
bool UseEpilogRemainder, bool UnrollRemainder,
- LoopInfo *LI,
- ScalarEvolution *SE, DominatorTree *DT,
- AssumptionCache *AC,
- bool PreserveLCSSA);
+ LoopInfo *LI, ScalarEvolution *SE,
+ DominatorTree *DT, AssumptionCache *AC,
+ bool PreserveLCSSA,
+ Loop **ResultLoop = nullptr);
void computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::UnrollingPreferences &UP,
@@ -84,7 +94,8 @@ LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
unsigned TripMultiple, bool UnrollRemainder,
LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
- OptimizationRemarkEmitter *ORE);
+ OptimizationRemarkEmitter *ORE,
+ Loop **EpilogueLoop = nullptr);
bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
DependenceInfo &DI);
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize.h
index 950af7ffe05f..70f9a2e0741b 100644
--- a/contrib/llvm/include/llvm/Transforms/Vectorize.h
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize.h
@@ -110,8 +110,8 @@ struct VectorizeConfig {
//
// LoopVectorize - Create a loop vectorization pass.
//
-Pass *createLoopVectorizePass(bool NoUnrolling = false,
- bool AlwaysVectorize = true);
+Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced = false,
+ bool VectorizeOnlyWhenForced = false);
//===----------------------------------------------------------------------===//
//
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h b/contrib/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h
new file mode 100644
index 000000000000..6b37d7093c44
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h
@@ -0,0 +1,27 @@
+//===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H
+#define LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LoadStoreVectorizerPass : public PassInfoMixin<LoadStoreVectorizerPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Create a legacy pass manager instance of the LoadStoreVectorizer pass
+Pass *createLoadStoreVectorizerPass();
+
+}
+
+#endif /* LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H */
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index 224879cdba52..5c7bba048607 100644
--- a/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -95,7 +95,7 @@ public:
FK_Enabled = 1, ///< Forcing enabled.
};
- LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
+ LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
OptimizationRemarkEmitter &ORE);
/// Mark the loop L as already vectorized by setting the width to 1.
@@ -105,7 +105,8 @@ public:
writeHintsToMetadata(Hints);
}
- bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const;
+ bool allowVectorization(Function *F, Loop *L,
+ bool VectorizeOnlyWhenForced) const;
/// Dumps all the hint information.
void emitRemarkWithHints() const;
@@ -113,7 +114,12 @@ public:
unsigned getWidth() const { return Width.Value; }
unsigned getInterleave() const { return Interleave.Value; }
unsigned getIsVectorized() const { return IsVectorized.Value; }
- enum ForceKind getForce() const { return (ForceKind)Force.Value; }
+ enum ForceKind getForce() const {
+ if ((ForceKind)Force.Value == FK_Undefined &&
+ hasDisableAllTransformsHint(TheLoop))
+ return FK_Disabled;
+ return (ForceKind)Force.Value;
+ }
/// If hints are provided that force vectorization, use the AlwaysPrint
/// pass name to force the frontend to print the diagnostic.
@@ -241,6 +247,10 @@ public:
/// If false, good old LV code.
bool canVectorize(bool UseVPlanNativePath);
+ /// Return true if we can vectorize this loop while folding its tail by
+ /// masking.
+ bool canFoldTailByMasking();
+
/// Returns the primary induction variable.
PHINode *getPrimaryInduction() { return PrimaryInduction; }
@@ -332,6 +342,11 @@ private:
/// If false, good old LV code.
bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
+ /// Set up outer loop inductions by checking Phis in outer loop header for
+ /// supported inductions (int inductions). Return false if any of these Phis
+ /// is not a supported induction or if we fail to find an induction.
+ bool setupOuterLoopInductions();
+
/// Return true if the pre-header, exiting and latch blocks of \p Lp
/// (non-recursive) are considered legal for vectorization.
/// Temporarily taking UseVPlanNativePath parameter. If true, take
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
index d79d84691803..d9c4f7b023c1 100644
--- a/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -78,12 +78,13 @@ class TargetTransformInfo;
/// The LoopVectorize Pass.
struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
- bool DisableUnrolling = false;
+ /// If false, consider all loops for interleaving.
+ /// If true, only loops that explicitly request interleaving are considered.
+ bool InterleaveOnlyWhenForced = false;
- /// If true, consider all loops for vectorization.
- /// If false, only loops that explicitly request vectorization are
- /// considered.
- bool AlwaysVectorize = true;
+ /// If false, consider all loops for vectorization.
+ /// If true, only loops that explicitly request vectorization are considered.
+ bool VectorizeOnlyWhenForced = false;
ScalarEvolution *SE;
LoopInfo *LI;
diff --git a/contrib/llvm/include/llvm/XRay/BlockIndexer.h b/contrib/llvm/include/llvm/XRay/BlockIndexer.h
new file mode 100644
index 000000000000..b42fa17f3fb7
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/BlockIndexer.h
@@ -0,0 +1,69 @@
+//===- BlockIndexer.h - FDR Block Indexing Visitor ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the RecordVisitor which generates a mapping between a
+// thread and a range of records representing a block.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_XRAY_BLOCKINDEXER_H_
+#define LLVM_LIB_XRAY_BLOCKINDEXER_H_
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/XRay/FDRRecords.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace xray {
+
+// The BlockIndexer will gather all related records associated with a
+// process+thread and group them by 'Block'.
+class BlockIndexer : public RecordVisitor {
+public:
+ struct Block {
+ uint64_t ProcessID;
+ int32_t ThreadID;
+ WallclockRecord *WallclockTime;
+ std::vector<Record *> Records;
+ };
+
+ // This maps the process + thread combination to a sequence of blocks.
+ using Index = DenseMap<std::pair<uint64_t, int32_t>, std::vector<Block>>;
+
+private:
+ Index &Indices;
+
+ Block CurrentBlock{0, 0, nullptr, {}};
+
+public:
+ explicit BlockIndexer(Index &I) : RecordVisitor(), Indices(I) {}
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+
+ /// The flush() function will clear out the current state of the visitor, to
+ /// allow for explicitly flushing a block's records to the currently
+ /// recognized thread and process combination.
+ Error flush();
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_LIB_XRAY_BLOCKINDEXER_H_
diff --git a/contrib/llvm/include/llvm/XRay/BlockPrinter.h b/contrib/llvm/include/llvm/XRay/BlockPrinter.h
new file mode 100644
index 000000000000..bfb21e239517
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/BlockPrinter.h
@@ -0,0 +1,62 @@
+//===- BlockPrinter.h - FDR Block Pretty Printer -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the RecordVisitor which formats a block of records for
+// easier human consumption.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_BLOCKPRINTER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_BLOCKPRINTER_H_
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/XRay/FDRRecords.h"
+#include "llvm/XRay/RecordPrinter.h"
+
+namespace llvm {
+namespace xray {
+
+class BlockPrinter : public RecordVisitor {
+ enum class State {
+ Start,
+ Preamble,
+ Metadata,
+ Function,
+ Arg,
+ CustomEvent,
+ End,
+ };
+
+ raw_ostream &OS;
+ RecordPrinter &RP;
+ State CurrentState = State::Start;
+
+public:
+ explicit BlockPrinter(raw_ostream &O, RecordPrinter &P)
+ : RecordVisitor(), OS(O), RP(P) {}
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+
+ void reset() { CurrentState = State::Start; }
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_BLOCKPRINTER_H_
diff --git a/contrib/llvm/include/llvm/XRay/BlockVerifier.h b/contrib/llvm/include/llvm/XRay/BlockVerifier.h
new file mode 100644
index 000000000000..46371c13891a
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/BlockVerifier.h
@@ -0,0 +1,72 @@
+//===- BlockVerifier.h - FDR Block Verifier -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the RecordVisitor which verifies a sequence of records
+// associated with a block, following the FDR mode log format's specifications.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_BLOCKVERIFIER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_BLOCKVERIFIER_H_
+
+#include "llvm/XRay/FDRRecords.h"
+#include <array>
+#include <bitset>
+
+namespace llvm {
+namespace xray {
+
+class BlockVerifier : public RecordVisitor {
+public:
+ // We force State elements to be size_t, to be used as indices for containers.
+ enum class State : std::size_t {
+ Unknown,
+ BufferExtents,
+ NewBuffer,
+ WallClockTime,
+ PIDEntry,
+ NewCPUId,
+ TSCWrap,
+ CustomEvent,
+ TypedEvent,
+ Function,
+ CallArg,
+ EndOfBuffer,
+ StateMax,
+ };
+
+private:
+ // We keep track of the current record seen by the verifier.
+ State CurrentRecord = State::Unknown;
+
+ // Transitions the current record to the new record, records an error on
+ // invalid transitions.
+ Error transition(State To);
+
+public:
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+
+ Error verify();
+ void reset();
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_BLOCKVERIFIER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRLogBuilder.h b/contrib/llvm/include/llvm/XRay/FDRLogBuilder.h
new file mode 100644
index 000000000000..b5e9ed5c406b
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRLogBuilder.h
@@ -0,0 +1,41 @@
+//===- FDRLogBuilder.h - XRay FDR Log Building Utility --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_FDRLOGBUILDER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_FDRLOGBUILDER_H_
+
+#include "llvm/XRay/FDRRecords.h"
+
+namespace llvm {
+namespace xray {
+
+/// The LogBuilder class allows for creating ad-hoc collections of records
+/// through the `add<...>(...)` function. An example use of this API is in
+/// crafting arbitrary sequences of records:
+///
+/// auto Records = LogBuilder()
+/// .add<BufferExtents>(256)
+/// .add<NewBufferRecord>(1)
+/// .consume();
+///
+class LogBuilder {
+ std::vector<std::unique_ptr<Record>> Records;
+
+public:
+ template <class R, class... T> LogBuilder &add(T &&... A) {
+ Records.emplace_back(new R(std::forward<T>(A)...));
+ return *this;
+ }
+
+ std::vector<std::unique_ptr<Record>> consume() { return std::move(Records); }
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_FDRLOGBUILDER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRRecordConsumer.h b/contrib/llvm/include/llvm/XRay/FDRRecordConsumer.h
new file mode 100644
index 000000000000..e856e1540558
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRRecordConsumer.h
@@ -0,0 +1,55 @@
+//===- FDRRecordConsumer.h - XRay Flight Data Recorder Mode Records -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_FDRRECORDCONSUMER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_FDRRECORDCONSUMER_H_
+
+#include "llvm/Support/Error.h"
+#include "llvm/XRay/FDRRecords.h"
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace xray {
+
+class RecordConsumer {
+public:
+ virtual Error consume(std::unique_ptr<Record> R) = 0;
+ virtual ~RecordConsumer() = default;
+};
+
+// This consumer will collect all the records into a vector of records, in
+// arrival order.
+class LogBuilderConsumer : public RecordConsumer {
+ std::vector<std::unique_ptr<Record>> &Records;
+
+public:
+ explicit LogBuilderConsumer(std::vector<std::unique_ptr<Record>> &R)
+ : RecordConsumer(), Records(R) {}
+
+ Error consume(std::unique_ptr<Record> R) override;
+};
+
+// A PipelineConsumer applies a set of visitors to every consumed Record, in the
+// order by which the visitors are added to the pipeline in the order of
+// appearance.
+class PipelineConsumer : public RecordConsumer {
+ std::vector<RecordVisitor *> Visitors;
+
+public:
+ PipelineConsumer(std::initializer_list<RecordVisitor *> V)
+ : RecordConsumer(), Visitors(V) {}
+
+ Error consume(std::unique_ptr<Record> R) override;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_FDRRECORDCONSUMER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRRecordProducer.h b/contrib/llvm/include/llvm/XRay/FDRRecordProducer.h
new file mode 100644
index 000000000000..efdba2a67b7b
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRRecordProducer.h
@@ -0,0 +1,51 @@
+//===- FDRRecordProducer.h - XRay FDR Mode Record Producer ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_FDRRECORDPRODUCER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_FDRRECORDPRODUCER_H_
+
+#include "llvm/Support/Error.h"
+#include "llvm/XRay/FDRRecords.h"
+#include "llvm/XRay/XRayRecord.h"
+#include <memory>
+
+namespace llvm {
+namespace xray {
+
+class RecordProducer {
+public:
+ /// All producer implementations must yield either an Error or a non-nullptr
+ /// unique_ptr<Record>.
+ virtual Expected<std::unique_ptr<Record>> produce() = 0;
+ virtual ~RecordProducer() = default;
+};
+
+class FileBasedRecordProducer : public RecordProducer {
+ const XRayFileHeader &Header;
+ DataExtractor &E;
+ uint32_t &OffsetPtr;
+ uint32_t CurrentBufferBytes = 0;
+
+ // Helper function which gets the next record by speculatively reading through
+ // the log, finding a buffer extents record.
+ Expected<std::unique_ptr<Record>> findNextBufferExtent();
+
+public:
+ FileBasedRecordProducer(const XRayFileHeader &FH, DataExtractor &DE,
+ uint32_t &OP)
+ : Header(FH), E(DE), OffsetPtr(OP) {}
+
+ /// This producer encapsulates the logic for loading a File-backed
+ /// RecordProducer hidden behind a DataExtractor.
+ Expected<std::unique_ptr<Record>> produce() override;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_FDRRECORDPRODUCER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRRecords.h b/contrib/llvm/include/llvm/XRay/FDRRecords.h
new file mode 100644
index 000000000000..8a84f4d0c1fb
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRRecords.h
@@ -0,0 +1,450 @@
+//===- FDRRecords.h - XRay Flight Data Recorder Mode Records --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define types and operations on these types that represent the different kinds
+// of records we encounter in XRay flight data recorder mode traces.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_XRAY_FDRRECORDS_H_
+#define LLVM_LIB_XRAY_FDRRECORDS_H_
+
+#include <cstdint>
+#include <string>
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/XRay/XRayRecord.h"
+
+namespace llvm {
+namespace xray {
+
+class RecordVisitor;
+class RecordInitializer;
+
+class Record {
+public:
+ enum class RecordKind {
+ RK_Metadata,
+ RK_Metadata_BufferExtents,
+ RK_Metadata_WallClockTime,
+ RK_Metadata_NewCPUId,
+ RK_Metadata_TSCWrap,
+ RK_Metadata_CustomEvent,
+ RK_Metadata_CustomEventV5,
+ RK_Metadata_CallArg,
+ RK_Metadata_PIDEntry,
+ RK_Metadata_NewBuffer,
+ RK_Metadata_EndOfBuffer,
+ RK_Metadata_TypedEvent,
+ RK_Metadata_LastMetadata,
+ RK_Function,
+ };
+
+ static StringRef kindToString(RecordKind K);
+
+private:
+ const RecordKind T;
+
+public:
+ Record(const Record &) = delete;
+ Record(Record &&) = delete;
+ Record &operator=(const Record &) = delete;
+ Record &operator=(Record &&) = delete;
+ explicit Record(RecordKind T) : T(T) {}
+
+ RecordKind getRecordType() const { return T; }
+
+ // Each Record should be able to apply an abstract visitor, and choose the
+ // appropriate function in the visitor to invoke, given its own type.
+ virtual Error apply(RecordVisitor &V) = 0;
+
+ virtual ~Record() = default;
+};
+
+class MetadataRecord : public Record {
+public:
+ enum class MetadataType : unsigned {
+ Unknown,
+ BufferExtents,
+ WallClockTime,
+ NewCPUId,
+ TSCWrap,
+ CustomEvent,
+ CallArg,
+ PIDEntry,
+ NewBuffer,
+ EndOfBuffer,
+ TypedEvent,
+ };
+
+protected:
+ static constexpr int kMetadataBodySize = 15;
+ friend class RecordInitializer;
+
+private:
+ const MetadataType MT;
+
+public:
+ explicit MetadataRecord(RecordKind T, MetadataType M) : Record(T), MT(M) {}
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() >= RecordKind::RK_Metadata &&
+ R->getRecordType() <= RecordKind::RK_Metadata_LastMetadata;
+ }
+
+ MetadataType metadataType() const { return MT; }
+
+ virtual ~MetadataRecord() = default;
+};
+
+// What follows are specific Metadata record types which encapsulate the
+// information associated with specific metadata record types in an FDR mode
+// log.
+class BufferExtents : public MetadataRecord {
+ uint64_t Size = 0;
+ friend class RecordInitializer;
+
+public:
+ BufferExtents()
+ : MetadataRecord(RecordKind::RK_Metadata_BufferExtents,
+ MetadataType::BufferExtents) {}
+
+ explicit BufferExtents(uint64_t S)
+ : MetadataRecord(RecordKind::RK_Metadata_BufferExtents,
+ MetadataType::BufferExtents),
+ Size(S) {}
+
+ uint64_t size() const { return Size; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_BufferExtents;
+ }
+};
+
+class WallclockRecord : public MetadataRecord {
+ uint64_t Seconds = 0;
+ uint32_t Nanos = 0;
+ friend class RecordInitializer;
+
+public:
+ WallclockRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_WallClockTime,
+ MetadataType::WallClockTime) {}
+
+ explicit WallclockRecord(uint64_t S, uint32_t N)
+ : MetadataRecord(RecordKind::RK_Metadata_WallClockTime,
+ MetadataType::WallClockTime),
+ Seconds(S), Nanos(N) {}
+
+ uint64_t seconds() const { return Seconds; }
+ uint32_t nanos() const { return Nanos; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_WallClockTime;
+ }
+};
+
+class NewCPUIDRecord : public MetadataRecord {
+ uint16_t CPUId = 0;
+ uint64_t TSC = 0;
+ friend class RecordInitializer;
+
+public:
+ NewCPUIDRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_NewCPUId,
+ MetadataType::NewCPUId) {}
+
+ NewCPUIDRecord(uint16_t C, uint64_t T)
+ : MetadataRecord(RecordKind::RK_Metadata_NewCPUId,
+ MetadataType::NewCPUId),
+ CPUId(C), TSC(T) {}
+
+ uint16_t cpuid() const { return CPUId; }
+
+ uint64_t tsc() const { return TSC; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_NewCPUId;
+ }
+};
+
+class TSCWrapRecord : public MetadataRecord {
+ uint64_t BaseTSC = 0;
+ friend class RecordInitializer;
+
+public:
+ TSCWrapRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_TSCWrap, MetadataType::TSCWrap) {
+ }
+
+ explicit TSCWrapRecord(uint64_t B)
+ : MetadataRecord(RecordKind::RK_Metadata_TSCWrap, MetadataType::TSCWrap),
+ BaseTSC(B) {}
+
+ uint64_t tsc() const { return BaseTSC; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_TSCWrap;
+ }
+};
+
+class CustomEventRecord : public MetadataRecord {
+ int32_t Size = 0;
+ uint64_t TSC = 0;
+ uint16_t CPU = 0;
+ std::string Data{};
+ friend class RecordInitializer;
+
+public:
+ CustomEventRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_CustomEvent,
+ MetadataType::CustomEvent) {}
+
+ explicit CustomEventRecord(uint64_t S, uint64_t T, uint16_t C, std::string D)
+ : MetadataRecord(RecordKind::RK_Metadata_CustomEvent,
+ MetadataType::CustomEvent),
+ Size(S), TSC(T), CPU(C), Data(std::move(D)) {}
+
+ int32_t size() const { return Size; }
+ uint64_t tsc() const { return TSC; }
+ uint16_t cpu() const { return CPU; }
+ StringRef data() const { return Data; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_CustomEvent;
+ }
+};
+
+class CustomEventRecordV5 : public MetadataRecord {
+ int32_t Size = 0;
+ int32_t Delta = 0;
+ std::string Data{};
+ friend class RecordInitializer;
+
+public:
+ CustomEventRecordV5()
+ : MetadataRecord(RecordKind::RK_Metadata_CustomEventV5,
+ MetadataType::CustomEvent) {}
+
+ explicit CustomEventRecordV5(int32_t S, int32_t D, std::string P)
+ : MetadataRecord(RecordKind::RK_Metadata_CustomEventV5,
+ MetadataType::CustomEvent),
+ Size(S), Delta(D), Data(std::move(P)) {}
+
+ int32_t size() const { return Size; }
+ int32_t delta() const { return Delta; }
+ StringRef data() const { return Data; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_CustomEventV5;
+ }
+};
+
+class TypedEventRecord : public MetadataRecord {
+ int32_t Size = 0;
+ int32_t Delta = 0;
+ uint16_t EventType = 0;
+ std::string Data{};
+ friend class RecordInitializer;
+
+public:
+ TypedEventRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_TypedEvent,
+ MetadataType::TypedEvent) {}
+
+ explicit TypedEventRecord(int32_t S, int32_t D, uint16_t E, std::string P)
+ : MetadataRecord(RecordKind::RK_Metadata_TypedEvent,
+ MetadataType::TypedEvent),
+ Size(S), Delta(D), Data(std::move(P)) {}
+
+ int32_t size() const { return Size; }
+ int32_t delta() const { return Delta; }
+ uint16_t eventType() const { return EventType; }
+ StringRef data() const { return Data; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_TypedEvent;
+ }
+};
+
+class CallArgRecord : public MetadataRecord {
+ uint64_t Arg;
+ friend class RecordInitializer;
+
+public:
+ CallArgRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_CallArg, MetadataType::CallArg) {
+ }
+
+ explicit CallArgRecord(uint64_t A)
+ : MetadataRecord(RecordKind::RK_Metadata_CallArg, MetadataType::CallArg),
+ Arg(A) {}
+
+ uint64_t arg() const { return Arg; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_CallArg;
+ }
+};
+
+class PIDRecord : public MetadataRecord {
+ int32_t PID = 0;
+ friend class RecordInitializer;
+
+public:
+ PIDRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_PIDEntry,
+ MetadataType::PIDEntry) {}
+
+ explicit PIDRecord(int32_t P)
+ : MetadataRecord(RecordKind::RK_Metadata_PIDEntry,
+ MetadataType::PIDEntry),
+ PID(P) {}
+
+ int32_t pid() const { return PID; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_PIDEntry;
+ }
+};
+
+class NewBufferRecord : public MetadataRecord {
+ int32_t TID = 0;
+ friend class RecordInitializer;
+
+public:
+ NewBufferRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_NewBuffer,
+ MetadataType::NewBuffer) {}
+
+ explicit NewBufferRecord(int32_t T)
+ : MetadataRecord(RecordKind::RK_Metadata_NewBuffer,
+ MetadataType::NewBuffer),
+ TID(T) {}
+
+ int32_t tid() const { return TID; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_NewBuffer;
+ }
+};
+
+class EndBufferRecord : public MetadataRecord {
+public:
+ EndBufferRecord()
+ : MetadataRecord(RecordKind::RK_Metadata_EndOfBuffer,
+ MetadataType::EndOfBuffer) {}
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Metadata_EndOfBuffer;
+ }
+};
+
+class FunctionRecord : public Record {
+ RecordTypes Kind;
+ int32_t FuncId;
+ uint32_t Delta;
+ friend class RecordInitializer;
+
+ static constexpr unsigned kFunctionRecordSize = 8;
+
+public:
+ FunctionRecord() : Record(RecordKind::RK_Function) {}
+
+ explicit FunctionRecord(RecordTypes K, int32_t F, uint32_t D)
+ : Record(RecordKind::RK_Function), Kind(K), FuncId(F), Delta(D) {}
+
+ // A function record is a concrete record type which has a number of common
+ // properties.
+ RecordTypes recordType() const { return Kind; }
+ int32_t functionId() const { return FuncId; }
+ uint32_t delta() const { return Delta; }
+
+ Error apply(RecordVisitor &V) override;
+
+ static bool classof(const Record *R) {
+ return R->getRecordType() == RecordKind::RK_Function;
+ }
+};
+
+class RecordVisitor {
+public:
+ virtual ~RecordVisitor() = default;
+
+ // Support all specific kinds of records:
+ virtual Error visit(BufferExtents &) = 0;
+ virtual Error visit(WallclockRecord &) = 0;
+ virtual Error visit(NewCPUIDRecord &) = 0;
+ virtual Error visit(TSCWrapRecord &) = 0;
+ virtual Error visit(CustomEventRecord &) = 0;
+ virtual Error visit(CallArgRecord &) = 0;
+ virtual Error visit(PIDRecord &) = 0;
+ virtual Error visit(NewBufferRecord &) = 0;
+ virtual Error visit(EndBufferRecord &) = 0;
+ virtual Error visit(FunctionRecord &) = 0;
+ virtual Error visit(CustomEventRecordV5 &) = 0;
+ virtual Error visit(TypedEventRecord &) = 0;
+};
+
+class RecordInitializer : public RecordVisitor {
+ DataExtractor &E;
+ uint32_t &OffsetPtr;
+ uint16_t Version;
+
+public:
+ static constexpr uint16_t DefaultVersion = 5u;
+
+ explicit RecordInitializer(DataExtractor &DE, uint32_t &OP, uint16_t V)
+ : RecordVisitor(), E(DE), OffsetPtr(OP), Version(V) {}
+
+ explicit RecordInitializer(DataExtractor &DE, uint32_t &OP)
+ : RecordInitializer(DE, OP, DefaultVersion) {}
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_LIB_XRAY_FDRRECORDS_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRTraceExpander.h b/contrib/llvm/include/llvm/XRay/FDRTraceExpander.h
new file mode 100644
index 000000000000..02a21bed5ce9
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRTraceExpander.h
@@ -0,0 +1,63 @@
+//===- FDRTraceExpander.h - XRay FDR Mode Log Expander --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// We define an FDR record visitor which can re-constitute XRayRecord instances
+// from a sequence of FDR mode records in arrival order into a collection.
+//
+//===----------------------------------------------------------------------===//
+#ifndef INCLUDE_LLVM_XRAY_FDRTRACEEXPANDER_H_
+#define INCLUDE_LLVM_XRAY_FDRTRACEEXPANDER_H_
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/XRay/FDRRecords.h"
+#include "llvm/XRay/XRayRecord.h"
+
+namespace llvm {
+namespace xray {
+
+class TraceExpander : public RecordVisitor {
+ // Type-erased callback for handling individual XRayRecord instances.
+ function_ref<void(const XRayRecord &)> C;
+ int32_t PID = 0;
+ int32_t TID = 0;
+ uint64_t BaseTSC = 0;
+ XRayRecord CurrentRecord{0, 0, RecordTypes::ENTER, 0, 0, 0, 0, {}, {}};
+ uint16_t CPUId = 0;
+ uint16_t LogVersion = 0;
+ bool BuildingRecord = false;
+ bool IgnoringRecords = false;
+
+ void resetCurrentRecord();
+
+public:
+ explicit TraceExpander(function_ref<void(const XRayRecord &)> F, uint16_t L)
+ : RecordVisitor(), C(std::move(F)), LogVersion(L) {}
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+
+ // Must be called after all the records have been processed, to handle the
+ // most recent record generated.
+ Error flush();
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // INCLUDE_LLVM_XRAY_FDRTRACEEXPANDER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FDRTraceWriter.h b/contrib/llvm/include/llvm/XRay/FDRTraceWriter.h
new file mode 100644
index 000000000000..7b3b5fa25eff
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FDRTraceWriter.h
@@ -0,0 +1,56 @@
+//===- FDRTraceWriter.h - XRay FDR Trace Writer -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Test a utility that can write out XRay FDR Mode formatted trace files.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_FDRTRACEWRITER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_FDRTRACEWRITER_H_
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/XRay/FDRRecords.h"
+#include "llvm/XRay/XRayRecord.h"
+
+namespace llvm {
+namespace xray {
+
+/// The FDRTraceWriter allows us to hand-craft an XRay Flight Data Recorder
+/// (FDR) mode log file. This is used primarily for testing, generating
+/// sequences of FDR records that can be read/processed. It can also be used to
+/// generate various kinds of execution traces without using the XRay runtime.
+/// Note that this writer does not do any validation, but uses the types of
+/// records defined in the FDRRecords.h file.
+class FDRTraceWriter : public RecordVisitor {
+public:
+ // Construct an FDRTraceWriter associated with an output stream.
+ explicit FDRTraceWriter(raw_ostream &O, const XRayFileHeader &H);
+ ~FDRTraceWriter();
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+
+private:
+ support::endian::Writer OS;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_FDRTRACEWRITER_H_
diff --git a/contrib/llvm/include/llvm/XRay/FileHeaderReader.h b/contrib/llvm/include/llvm/XRay/FileHeaderReader.h
new file mode 100644
index 000000000000..3b8809bdbb34
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/FileHeaderReader.h
@@ -0,0 +1,33 @@
+//===- FileHeaderReader.h - XRay Trace File Header Reading Function -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares functions that can load an XRay log header from various
+// sources.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_XRAY_FILEHEADERREADER_H_
+#define LLVM_LIB_XRAY_FILEHEADERREADER_H_
+
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/XRay/XRayRecord.h"
+#include <cstdint>
+
+namespace llvm {
+namespace xray {
+
+/// Convenience function for loading the file header given a data extractor at a
+/// specified offset.
+Expected<XRayFileHeader> readBinaryFormatHeader(DataExtractor &HeaderExtractor,
+ uint32_t &OffsetPtr);
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_LIB_XRAY_FILEHEADERREADER_H_
diff --git a/contrib/llvm/include/llvm/XRay/Profile.h b/contrib/llvm/include/llvm/XRay/Profile.h
new file mode 100644
index 000000000000..9365630358e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/Profile.h
@@ -0,0 +1,150 @@
+//===- Profile.h - XRay Profile Abstraction -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the XRay Profile class representing the latency profile generated by
+// XRay's profiling mode.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_XRAY_PROFILE_H
+#define LLVM_XRAY_PROFILE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include <list>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+namespace xray {
+
+class Profile;
+
+// We forward declare the Trace type for turning a Trace into a Profile.
+class Trace;
+
+/// This function will attempt to load an XRay Profiling Mode profile from the
+/// provided |Filename|.
+///
+/// For any errors encountered in the loading of the profile data from
+/// |Filename|, this function will return an Error condition appropriately.
+Expected<Profile> loadProfile(StringRef Filename);
+
+/// This algorithm will merge two Profile instances into a single Profile
+/// instance, aggregating blocks by Thread ID.
+Profile mergeProfilesByThread(const Profile &L, const Profile &R);
+
+/// This algorithm will merge two Profile instances into a single Profile
+/// instance, aggregating blocks by function call stack.
+Profile mergeProfilesByStack(const Profile &L, const Profile &R);
+
+/// This function takes a Trace and creates a Profile instance from it.
+Expected<Profile> profileFromTrace(const Trace &T);
+
+/// Profile instances are thread-compatible.
+class Profile {
+public:
+ using ThreadID = uint64_t;
+ using PathID = unsigned;
+ using FuncID = int32_t;
+
+ struct Data {
+ uint64_t CallCount;
+ uint64_t CumulativeLocalTime;
+ };
+
+ struct Block {
+ ThreadID Thread;
+ std::vector<std::pair<PathID, Data>> PathData;
+ };
+
+ /// Provides a sequence of function IDs from a previously interned PathID.
+ ///
+ /// Returns an error if |P| had not been interned before into the Profile.
+ ///
+ Expected<std::vector<FuncID>> expandPath(PathID P) const;
+
+ /// The stack represented in |P| must be in stack order (leaf to root). This
+ /// will always return the same PathID for |P| that has the same sequence.
+ PathID internPath(ArrayRef<FuncID> P);
+
+ /// Appends a fully-formed Block instance into the Profile.
+ ///
+ /// Returns an error condition in the following cases:
+ ///
+ /// - The PathData component of the Block is empty
+ ///
+ Error addBlock(Block &&B);
+
+ Profile() = default;
+ ~Profile() = default;
+
+ Profile(Profile &&O) noexcept
+ : Blocks(std::move(O.Blocks)), NodeStorage(std::move(O.NodeStorage)),
+ Roots(std::move(O.Roots)), PathIDMap(std::move(O.PathIDMap)),
+ NextID(O.NextID) {}
+
+ Profile &operator=(Profile &&O) noexcept {
+ Blocks = std::move(O.Blocks);
+ NodeStorage = std::move(O.NodeStorage);
+ Roots = std::move(O.Roots);
+ PathIDMap = std::move(O.PathIDMap);
+ NextID = O.NextID;
+ return *this;
+ }
+
+ Profile(const Profile &);
+ Profile &operator=(const Profile &);
+
+ friend void swap(Profile &L, Profile &R) {
+ using std::swap;
+ swap(L.Blocks, R.Blocks);
+ swap(L.NodeStorage, R.NodeStorage);
+ swap(L.Roots, R.Roots);
+ swap(L.PathIDMap, R.PathIDMap);
+ swap(L.NextID, R.NextID);
+ }
+
+private:
+ using BlockList = std::list<Block>;
+
+ struct TrieNode {
+ FuncID Func = 0;
+ std::vector<TrieNode *> Callees{};
+ TrieNode *Caller = nullptr;
+ PathID ID = 0;
+ };
+
+ // List of blocks associated with a Profile.
+ BlockList Blocks;
+
+ // List of TrieNode elements we've seen.
+ std::list<TrieNode> NodeStorage;
+
+ // List of call stack roots.
+ SmallVector<TrieNode *, 4> Roots;
+
+ // Reverse mapping between a PathID to a TrieNode*.
+ DenseMap<PathID, TrieNode *> PathIDMap;
+
+ // Used to identify paths.
+ PathID NextID = 1;
+
+public:
+ using const_iterator = BlockList::const_iterator;
+ const_iterator begin() const { return Blocks.begin(); }
+ const_iterator end() const { return Blocks.end(); }
+ bool empty() const { return Blocks.empty(); }
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/XRay/RecordPrinter.h b/contrib/llvm/include/llvm/XRay/RecordPrinter.h
new file mode 100644
index 000000000000..649c64ab6f5c
--- /dev/null
+++ b/contrib/llvm/include/llvm/XRay/RecordPrinter.h
@@ -0,0 +1,50 @@
+//===- RecordPrinter.h - FDR Record Printer -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the RecordVisitor which prints an individual record's
+// data in an adhoc format, suitable for human inspection.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_INCLUDE_LLVM_XRAY_RECORDPRINTER_H_
+#define LLVM_INCLUDE_LLVM_XRAY_RECORDPRINTER_H_
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/XRay/FDRRecords.h"
+
+namespace llvm {
+namespace xray {
+
+class RecordPrinter : public RecordVisitor {
+ raw_ostream &OS;
+ std::string Delim;
+
+public:
+ explicit RecordPrinter(raw_ostream &O, std::string D)
+ : RecordVisitor(), OS(O), Delim(std::move(D)) {}
+
+ explicit RecordPrinter(raw_ostream &O) : RecordPrinter(O, ""){};
+
+ Error visit(BufferExtents &) override;
+ Error visit(WallclockRecord &) override;
+ Error visit(NewCPUIDRecord &) override;
+ Error visit(TSCWrapRecord &) override;
+ Error visit(CustomEventRecord &) override;
+ Error visit(CallArgRecord &) override;
+ Error visit(PIDRecord &) override;
+ Error visit(NewBufferRecord &) override;
+ Error visit(EndBufferRecord &) override;
+ Error visit(FunctionRecord &) override;
+ Error visit(CustomEventRecordV5 &) override;
+ Error visit(TypedEventRecord &) override;
+};
+
+} // namespace xray
+} // namespace llvm
+
+#endif // LLVM_INCLUDE_LLVM_XRAY_RECORDPRINTER_H
diff --git a/contrib/llvm/include/llvm/XRay/Trace.h b/contrib/llvm/include/llvm/XRay/Trace.h
index 6b033d686b06..924addd1560d 100644
--- a/contrib/llvm/include/llvm/XRay/Trace.h
+++ b/contrib/llvm/include/llvm/XRay/Trace.h
@@ -17,8 +17,8 @@
#include <vector>
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/XRay/XRayRecord.h"
namespace llvm {
@@ -46,25 +46,35 @@ namespace xray {
///
class Trace {
XRayFileHeader FileHeader;
- std::vector<XRayRecord> Records;
+ using RecordVector = std::vector<XRayRecord>;
+ RecordVector Records;
typedef std::vector<XRayRecord>::const_iterator citerator;
- friend Expected<Trace> loadTraceFile(StringRef, bool);
+ friend Expected<Trace> loadTrace(const DataExtractor &, bool);
public:
+ using size_type = RecordVector::size_type;
+ using value_type = RecordVector::value_type;
+ using const_iterator = RecordVector::const_iterator;
+
/// Provides access to the loaded XRay trace file header.
const XRayFileHeader &getFileHeader() const { return FileHeader; }
- citerator begin() const { return Records.begin(); }
- citerator end() const { return Records.end(); }
- size_t size() const { return Records.size(); }
+ const_iterator begin() const { return Records.begin(); }
+ const_iterator end() const { return Records.end(); }
+ bool empty() const { return Records.empty(); }
+ size_type size() const { return Records.size(); }
};
/// This function will attempt to load XRay trace records from the provided
/// |Filename|.
Expected<Trace> loadTraceFile(StringRef Filename, bool Sort = false);
+/// This function will attempt to load XRay trace records from the provided
+/// DataExtractor.
+Expected<Trace> loadTrace(const DataExtractor &Extractor, bool Sort = false);
+
} // namespace xray
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/XRay/XRayRecord.h b/contrib/llvm/include/llvm/XRay/XRayRecord.h
index 76873447f170..7685ec95838a 100644
--- a/contrib/llvm/include/llvm/XRay/XRayRecord.h
+++ b/contrib/llvm/include/llvm/XRay/XRayRecord.h
@@ -17,6 +17,7 @@
#include <cstdint>
#include <vector>
+#include <string>
namespace llvm {
namespace xray {
@@ -54,10 +55,23 @@ struct XRayFileHeader {
/// This may or may not correspond to actual record types in the raw trace (as
/// the loader implementation may synthesize this information in the process of
/// of loading).
-enum class RecordTypes { ENTER, EXIT, TAIL_EXIT, ENTER_ARG };
+enum class RecordTypes {
+ ENTER,
+ EXIT,
+ TAIL_EXIT,
+ ENTER_ARG,
+ CUSTOM_EVENT,
+ TYPED_EVENT
+};
+/// An XRayRecord is the denormalized view of data associated in a trace. These
+/// records may not correspond to actual entries in the raw traces, but they are
+/// the logical representation of records in a higher-level event log.
struct XRayRecord {
- /// The type of record.
+ /// RecordType values are used as "sub-types" which have meaning in the
+ /// context of the `Type` below. For function call and custom event records,
+ /// the RecordType is always 0, while for typed events we store the type in
+ /// the RecordType field.
uint16_t RecordType;
/// The CPU where the thread is running. We assume number of CPUs <= 65536.
@@ -66,7 +80,7 @@ struct XRayRecord {
/// Identifies the type of record.
RecordTypes Type;
- /// The function ID for the record.
+ /// The function ID for the record, if this is a function call record.
int32_t FuncId;
/// Get the full 8 bytes of the TSC when we get the log record.
@@ -80,6 +94,9 @@ struct XRayRecord {
/// The function call arguments.
std::vector<uint64_t> CallArgs;
+
+ /// For custom and typed events, we provide the raw data from the trace.
+ std::string Data;
};
} // namespace xray
diff --git a/contrib/llvm/include/llvm/XRay/YAMLXRayRecord.h b/contrib/llvm/include/llvm/XRay/YAMLXRayRecord.h
index 0de9ea0968e6..6150196ed98d 100644
--- a/contrib/llvm/include/llvm/XRay/YAMLXRayRecord.h
+++ b/contrib/llvm/include/llvm/XRay/YAMLXRayRecord.h
@@ -39,6 +39,7 @@ struct YAMLXRayRecord {
uint32_t TId;
uint32_t PId;
std::vector<uint64_t> CallArgs;
+ std::string Data;
};
struct YAMLXRayTrace {
@@ -58,6 +59,8 @@ template <> struct ScalarEnumerationTraits<xray::RecordTypes> {
IO.enumCase(Type, "function-exit", xray::RecordTypes::EXIT);
IO.enumCase(Type, "function-tail-exit", xray::RecordTypes::TAIL_EXIT);
IO.enumCase(Type, "function-enter-arg", xray::RecordTypes::ENTER_ARG);
+ IO.enumCase(Type, "custom-event", xray::RecordTypes::CUSTOM_EVENT);
+ IO.enumCase(Type, "typed-event", xray::RecordTypes::TYPED_EVENT);
}
};
@@ -73,16 +76,16 @@ template <> struct MappingTraits<xray::YAMLXRayFileHeader> {
template <> struct MappingTraits<xray::YAMLXRayRecord> {
static void mapping(IO &IO, xray::YAMLXRayRecord &Record) {
- // FIXME: Make this type actually be descriptive
IO.mapRequired("type", Record.RecordType);
- IO.mapRequired("func-id", Record.FuncId);
+ IO.mapOptional("func-id", Record.FuncId);
IO.mapOptional("function", Record.Function);
IO.mapOptional("args", Record.CallArgs);
IO.mapRequired("cpu", Record.CPU);
- IO.mapRequired("thread", Record.TId);
+ IO.mapOptional("thread", Record.TId, 0U);
IO.mapOptional("process", Record.PId, 0U);
IO.mapRequired("kind", Record.Type);
IO.mapRequired("tsc", Record.TSC);
+ IO.mapOptional("data", Record.Data);
}
static constexpr bool flow = true;
diff --git a/contrib/llvm/include/llvm/module.extern.modulemap b/contrib/llvm/include/llvm/module.extern.modulemap
new file mode 100644
index 000000000000..8acda137e044
--- /dev/null
+++ b/contrib/llvm/include/llvm/module.extern.modulemap
@@ -0,0 +1,5 @@
+module LLVM_Extern_Config_Def {}
+module LLVM_Extern_IR_Attributes_Gen {}
+module LLVM_Extern_IR_Intrinsics_Gen {}
+module LLVM_Extern_IR_Intrinsics_Enum {}
+module LLVM_Extern_Utils_DataTypes {}
diff --git a/contrib/llvm/include/llvm/module.install.modulemap b/contrib/llvm/include/llvm/module.install.modulemap
new file mode 100644
index 000000000000..ac73a8612326
--- /dev/null
+++ b/contrib/llvm/include/llvm/module.install.modulemap
@@ -0,0 +1,27 @@
+
+module LLVM_Extern_Config_Def {
+ textual header "Config/AsmParsers.def"
+ textual header "Config/AsmPrinters.def"
+ textual header "Config/Disassemblers.def"
+ textual header "Config/Targets.def"
+ export *
+}
+
+module LLVM_Extern_IR_Attributes_Gen {
+ textual header "IR/Attributes.gen"
+ textual header "IR/Attributes.inc"
+}
+
+module LLVM_Extern_IR_Intrinsics_Gen {
+ textual header "IR/Intrinsics.gen"
+ textual header "IR/Intrinsics.inc"
+}
+
+module LLVM_Extern_IR_Intrinsics_Enum {
+ textual header "IR/IntrinsicEnums.inc"
+}
+
+module LLVM_Extern_Utils_DataTypes {
+ header "Support/DataTypes.h"
+ export *
+}
diff --git a/contrib/llvm/include/llvm/module.modulemap b/contrib/llvm/include/llvm/module.modulemap
index 649cdf3b0a89..bcc12534ec85 100644
--- a/contrib/llvm/include/llvm/module.modulemap
+++ b/contrib/llvm/include/llvm/module.modulemap
@@ -7,7 +7,11 @@ module LLVM_Analysis {
textual header "Analysis/TargetLibraryInfo.def"
}
-module LLVM_AsmParser { requires cplusplus umbrella "AsmParser" module * { export * } }
+module LLVM_AsmParser {
+ requires cplusplus
+ umbrella "AsmParser"
+ module * { export * }
+}
// A module covering CodeGen/ and Target/. These are intertwined
// and codependent, and thus notionally form a single module.
@@ -27,14 +31,21 @@ module LLVM_Backend {
textual header "CodeGen/CommandFlags.inc"
textual header "CodeGen/DIEValue.def"
}
+}
- module Target {
- umbrella "Target"
- module * { export * }
- }
+// FIXME: Make this as a submodule of LLVM_Backend again.
+// Doing so causes a linker error in clang-format.
+module LLVM_Backend_Target {
+ umbrella "Target"
+ module * { export * }
+}
+
+module LLVM_Bitcode {
+ requires cplusplus
+ umbrella "Bitcode"
+ module * { export * }
}
-module LLVM_Bitcode { requires cplusplus umbrella "Bitcode" module * { export * } }
module LLVM_BinaryFormat {
requires cplusplus
@@ -52,6 +63,7 @@ module LLVM_BinaryFormat {
textual header "BinaryFormat/ELFRelocs/i386.def"
textual header "BinaryFormat/ELFRelocs/Lanai.def"
textual header "BinaryFormat/ELFRelocs/Mips.def"
+ textual header "BinaryFormat/ELFRelocs/MSP430.def"
textual header "BinaryFormat/ELFRelocs/PowerPC64.def"
textual header "BinaryFormat/ELFRelocs/PowerPC.def"
textual header "BinaryFormat/ELFRelocs/RISCV.def"
@@ -59,9 +71,15 @@ module LLVM_BinaryFormat {
textual header "BinaryFormat/ELFRelocs/SystemZ.def"
textual header "BinaryFormat/ELFRelocs/x86_64.def"
textual header "BinaryFormat/WasmRelocs.def"
+ textual header "BinaryFormat/MsgPack.def"
}
-module LLVM_Config { requires cplusplus umbrella "Config" module * { export * } }
+module LLVM_Config {
+ requires cplusplus
+ umbrella "Config"
+ extern module LLVM_Extern_Config_Def "module.extern.modulemap"
+ module * { export * }
+}
module LLVM_DebugInfo {
requires cplusplus
@@ -87,12 +105,14 @@ module LLVM_DebugInfo_PDB {
// FIXME: There should be a better way to specify this.
exclude header "DebugInfo/PDB/DIA/DIADataStream.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumDebugStreams.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumFrameData.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumInjectedSources.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumLineNumbers.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumSectionContribs.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumSourceFiles.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumSymbols.h"
exclude header "DebugInfo/PDB/DIA/DIAEnumTables.h"
+ exclude header "DebugInfo/PDB/DIA/DIAFrameData.h"
exclude header "DebugInfo/PDB/DIA/DIAInjectedSource.h"
exclude header "DebugInfo/PDB/DIA/DIALineNumber.h"
exclude header "DebugInfo/PDB/DIA/DIARawSymbol.h"
@@ -177,7 +197,11 @@ module LLVM_intrinsic_gen {
// Attributes.h
module IR_Argument { header "IR/Argument.h" export * }
- module IR_Attributes { header "IR/Attributes.h" export * }
+ module IR_Attributes {
+ header "IR/Attributes.h"
+ extern module LLVM_Extern_IR_Attributes_Gen "module.extern.modulemap"
+ export *
+ }
module IR_CallSite { header "IR/CallSite.h" export * }
module IR_ConstantFolder { header "IR/ConstantFolder.h" export * }
module IR_GlobalVariable { header "IR/GlobalVariable.h" export * }
@@ -192,6 +216,7 @@ module LLVM_intrinsic_gen {
// Intrinsics.h
module IR_CFG { header "IR/CFG.h" export * }
+ module IR_CFGDiff { header "IR/CFGDiff.h" export * }
module IR_ConstantRange { header "IR/ConstantRange.h" export * }
module IR_Dominators { header "IR/Dominators.h" export * }
module Analysis_PostDominators { header "Analysis/PostDominators.h" export * }
@@ -202,7 +227,12 @@ module LLVM_intrinsic_gen {
module IR_Verifier { header "IR/Verifier.h" export * }
module IR_InstIterator { header "IR/InstIterator.h" export * }
module IR_InstVisitor { header "IR/InstVisitor.h" export * }
- module IR_Intrinsics { header "IR/Intrinsics.h" export * }
+ module IR_Intrinsics {
+ header "IR/Intrinsics.h"
+ extern module LLVM_Extern_IR_Intricsics_Gen "module.extern.modulemap"
+ extern module LLVM_Extern_IR_Intrinsics_Enum "module.extern.modulemap"
+ export *
+ }
module IR_IntrinsicInst { header "IR/IntrinsicInst.h" export * }
module IR_PatternMatch { header "IR/PatternMatch.h" export * }
module IR_Statepoint { header "IR/Statepoint.h" export * }
@@ -224,9 +254,23 @@ module LLVM_IR {
textual header "IR/RuntimeLibcalls.def"
}
-module LLVM_IRReader { requires cplusplus umbrella "IRReader" module * { export * } }
-module LLVM_LineEditor { requires cplusplus umbrella "LineEditor" module * { export * } }
-module LLVM_LTO { requires cplusplus umbrella "LTO" module * { export * } }
+module LLVM_IRReader {
+ requires cplusplus
+ umbrella "IRReader"
+ module * { export * }
+}
+
+module LLVM_LineEditor {
+ requires cplusplus
+ umbrella "LineEditor"
+ module * { export * }
+}
+
+module LLVM_LTO {
+ requires cplusplus
+ umbrella "LTO"
+ module * { export * }
+}
module LLVM_MC {
requires cplusplus
@@ -253,7 +297,11 @@ module LLVM_Object {
module * { export * }
}
-module LLVM_Option { requires cplusplus umbrella "Option" module * { export * } }
+module LLVM_Option {
+ requires cplusplus
+ umbrella "Option"
+ module * { export * }
+}
module LLVM_ProfileData {
requires cplusplus
@@ -271,7 +319,11 @@ module LLVM_Support_TargetRegistry {
export *
}
-module LLVM_TableGen { requires cplusplus umbrella "TableGen" module * { export * } }
+module LLVM_TableGen {
+ requires cplusplus
+ umbrella "TableGen"
+ module * { export * }
+}
module LLVM_Transforms {
requires cplusplus
@@ -279,6 +331,8 @@ module LLVM_Transforms {
module * { export * }
}
+extern module LLVM_Extern_Utils_DataTypes "module.extern.modulemap"
+
// A module covering ADT/ and Support/. These are intertwined and
// codependent, and notionally form a single module.
module LLVM_Utils {